summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig32
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c81
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/device_pm.c29
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c9
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/android/binder.c46
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c94
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_brcm.c120
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/pata_at32.c400
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/eni.c18
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/cacheinfo.c13
-rw-r--r--drivers/base/cpu.c48
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c69
-rw-r--r--drivers/base/power/main.c430
-rw-r--r--drivers/base/power/power.h11
-rw-r--r--drivers/base/power/runtime.c84
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/base/power/wakeirq.c8
-rw-r--r--drivers/base/power/wakeup.c26
-rw-r--r--drivers/base/property.c111
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-flat.c15
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-sdw.c88
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/bcma/driver_pcie2.c3
-rw-r--r--drivers/block/DAC960.c160
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoe.h3
-rw-r--r--drivers/block/aoe/aoecmd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/null_blk.c294
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/Kconfig14
-rw-r--r--drivers/bluetooth/bluecard_cs.c8
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btbcm.h2
-rw-r--r--drivers/bluetooth/btintel.c157
-rw-r--r--drivers/bluetooth/btintel.h33
-rw-r--r--drivers/bluetooth/btqcomsmd.c3
-rw-r--r--drivers/bluetooth/btsdio.c9
-rw-r--r--drivers/bluetooth/btusb.c162
-rw-r--r--drivers/bluetooth/hci_bcm.c243
-rw-r--r--drivers/bluetooth/hci_intel.c186
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c107
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bluetooth/hci_serdev.c1
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig45
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c169
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c154
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/hw_random/exynos-trng.c235
-rw-r--r--drivers/char/hw_random/imx-rngc.c13
-rw-r--r--drivers/char/hw_random/mtk-rng.c1
-rw-r--r--drivers/char/hw_random/tpm-rng.c50
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c4
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/random.c28
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig11
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/tpm-chip.c67
-rw-r--r--drivers/char/tpm/tpm-interface.c231
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm1_eventlog.c13
-rw-r--r--drivers/char/tpm/tpm2-cmd.c12
-rw-r--r--drivers/char/tpm/tpm2_eventlog.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.h138
-rw-r--r--drivers/char/tpm/tpm_eventlog_acpi.c (renamed from drivers/char/tpm/tpm_acpi.c)4
-rw-r--r--drivers/char/tpm/tpm_eventlog_efi.c66
-rw-r--r--drivers/char/tpm/tpm_eventlog_of.c (renamed from drivers/char/tpm/tpm_of.c)6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c27
-rw-r--r--drivers/char/tpm/tpm_tis.c108
-rw-r--r--drivers/char/tpm/tpm_tis_core.c193
-rw-r--r--drivers/char/tpm/tpm_tis_core.h16
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c61
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c12
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/owl-timer.c5
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/clocksource/timer-of.c84
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-sprd.c159
-rw-r--r--drivers/clocksource/timer-stm32.c358
-rw-r--r--drivers/cpufreq/Kconfig.arm88
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/arm_big_little.c23
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c241
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c27
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c182
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c23
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c16
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c143
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c193
-rw-r--r--drivers/cpufreq/ti-cpufreq.c51
-rw-r--r--drivers/cpuidle/governor.c5
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c120
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c68
-rw-r--r--drivers/crypto/caam/caamhash.c73
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc.h29
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c540
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h15
-rw-r--r--drivers/crypto/chelsio/chcr_core.c14
-rw-r--r--drivers/crypto/chelsio/chcr_core.h38
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h76
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c371
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c136
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c214
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/marvell/cesa.c20
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c27
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/s5p-sss.c26
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/bcm2835-dma.c10
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/img-mdc-dma.c17
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/k3dma.c10
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c41
-rw-r--r--drivers/dma/qcom/hidma_ll.c9
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c61
-rw-r--r--drivers/dma/s3c24xx-dma.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c68
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/tegra20-apb-dma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/virt-dma.c5
-rw-r--r--drivers/dma/virt-dma.h44
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c302
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c179
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/ti_edac.c341
-rw-r--r--drivers/extcon/extcon-axp288.c39
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c142
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c4
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_sdei.c1092
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/capsule-loader.c47
-rw-r--r--drivers/firmware/efi/cper-arm.c356
-rw-r--r--drivers/firmware/efi/cper.c122
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/tpm.c81
-rw-r--r--drivers/firmware/efi/tpm.c40
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c46
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c42
-rw-r--r--drivers/gpio/gpio-74x164.c5
-rw-r--r--drivers/gpio/gpio-adp5520.c3
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-altera.c3
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c41
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-axp209.c188
-rw-r--r--drivers/gpio/gpio-bcm-kona.c11
-rw-r--r--drivers/gpio/gpio-brcmstb.c5
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-max732x.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpio-mmio.c30
-rw-r--r--drivers/gpio/gpio-mockup.c288
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c447
-rw-r--r--drivers/gpio/gpio-reg.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c54
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-winbond.c732
-rw-r--r--drivers/gpio/gpio-xgene-sb.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c59
-rw-r--r--drivers/gpio/gpiolib-devprop.c17
-rw-r--r--drivers/gpio/gpiolib-of.c122
-rw-r--r--drivers/gpio/gpiolib-sysfs.c33
-rw-r--r--drivers/gpio/gpiolib.c396
-rw-r--r--drivers/gpio/gpiolib.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/drm_connector.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c52
-rw-r--r--drivers/gpu/drm/drm_file.c4
-rw-r--r--drivers/gpu/drm/drm_lease.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_plane.c42
-rw-r--r--drivers/gpu/drm/drm_syncobj.c77
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c22
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c322
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/r128/r128_state.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c20
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-asus.c41
-rw-r--r--drivers/hid/hid-core.c934
-rw-r--r--drivers/hid/hid-cp2112.c15
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c78
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-generic.c68
-rw-r--r--drivers/hid/hid-holtekff.c8
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-jabra.c58
-rw-r--r--drivers/hid/hid-multitouch.c137
-rw-r--r--drivers/hid/hid-quirks.c1276
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sony.c93
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c18
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/Makefile2
-rw-r--r--drivers/hid/usbhid/hid-core.c11
-rw-r--r--drivers/hid/usbhid/hid-quirks.c402
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/wacom_sys.c134
-rw-r--r--drivers/hid/wacom_wac.c67
-rw-r--r--drivers/hid/wacom_wac.h6
-rw-r--r--drivers/hsi/clients/cmt_speech.c8
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/ring_buffer.c23
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c22
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c54
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c33
-rw-r--r--drivers/hwmon/iio_hwmon.c3
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c288
-rw-r--r--drivers/hwmon/pmbus/ir35221.c189
-rw-r--r--drivers/hwmon/pmbus/lm25066.c67
-rw-r--r--drivers/hwmon/pmbus/max31785.c294
-rw-r--r--drivers/hwmon/pmbus/pmbus.h41
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c273
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c7
-rw-r--r--drivers/hwmon/w83773g.c329
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c15
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c13
-rw-r--r--drivers/iio/adc/Kconfig37
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c68
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1205
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c302
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h310
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c11
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c247
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c4
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c65
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c227
-rw-r--r--drivers/infiniband/core/cma.c254
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h54
-rw-r--r--drivers/infiniband/core/cq.c39
-rw-r--r--drivers/infiniband/core/device.c62
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c1
-rw-r--r--drivers/infiniband/core/netlink.c10
-rw-r--r--drivers/infiniband/core/nldev.c450
-rw-r--r--drivers/infiniband/core/restrack.c164
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c13
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/security.c20
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucm.c77
-rw-r--r--drivers/infiniband/core/ucma.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c127
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c24
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c19
-rw-r--r--drivers/infiniband/core/uverbs_main.c103
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/verbs.c315
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h43
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c145
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h39
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c404
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c251
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h78
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c141
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h127
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c16
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c100
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h10
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c87
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c16
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c22
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c64
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h26
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c30
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c12
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h103
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c759
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c758
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1837
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h283
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c72
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c68
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c50
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c46
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c83
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1399
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h115
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c437
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h125
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c82
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c235
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h10
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c18
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c32
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c23
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c15
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h6
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c123
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c795
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h43
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c965
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h100
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c3
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c23
-rw-r--r--drivers/input/mouse/alps.h10
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/trackpoint.c245
-rw-r--r--drivers/input/mouse/trackpoint.h34
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/serio/userio.c2
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c16
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/stmfts.c15
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/intel-svm.c1
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm2836.c46
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c40
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c139
-rw-r--r--drivers/irqchip/irq-ompic.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/divert/divert_procfs.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c22
-rw-r--r--drivers/isdn/mISDN/timerdev.c4
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-core.c1
-rw-r--r--drivers/leds/leds-as3645a.c7
-rw-r--r--drivers/leds/leds-blinkm.c4
-rw-r--r--drivers/leds/leds-lm3692x.c393
-rw-r--r--drivers/leds/leds-lp8860.c66
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c496
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c33
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/Kconfig7
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c462
-rw-r--r--drivers/lightnvm/pblk-cache.c5
-rw-r--r--drivers/lightnvm/pblk-core.c55
-rw-r--r--drivers/lightnvm/pblk-gc.c23
-rw-r--r--drivers/lightnvm/pblk-init.c104
-rw-r--r--drivers/lightnvm/pblk-map.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c111
-rw-r--r--drivers/lightnvm/pblk-read.c35
-rw-r--r--drivers/lightnvm/pblk-recovery.c43
-rw-r--r--drivers/lightnvm/pblk-rl.c54
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c23
-rw-r--r--drivers/lightnvm/pblk.h163
-rw-r--r--drivers/lightnvm/rrpc.c1625
-rw-r--r--drivers/lightnvm/rrpc.h290
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/md/Kconfig7
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.c47
-rw-r--r--drivers/md/bcache/closure.h60
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c13
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c29
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/bcache/util.c34
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/bcache/writeback.c203
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-bufio.c45
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c5
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-io.c3
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c6
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c383
-rw-r--r--drivers/md/dm-queue-length.c6
-rw-r--r--drivers/md/dm-raid.c389
-rw-r--r--drivers/md/dm-rq.c34
-rw-r--r--drivers/md/dm-service-time.c6
-rw-r--r--drivers/md/dm-snap.c132
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c119
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c31
-rw-r--r--drivers/md/dm-unstripe.c219
-rw-r--r--drivers/md/dm-zoned-metadata.c3
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c680
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-log.h30
-rw-r--r--drivers/md/raid5-ppl.c167
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/cec/cec-api.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c8
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c7
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/media-devnode.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c8
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h4
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c4
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c4
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h2
-rw-r--r--drivers/media/radio/radio-cadet.c6
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c6
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c10
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c4
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c57
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c59
-rw-r--r--drivers/message/fusion/mptctl.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab8500-debugfs.c439
-rw-r--r--drivers/mfd/arizona-irq.c4
-rw-r--r--drivers/mfd/atmel-flexcom.c63
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/cros_ec.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c (renamed from drivers/platform/chrome/cros_ec_dev.c)8
-rw-r--r--drivers/mfd/cros_ec_dev.h (renamed from drivers/platform/chrome/cros_ec_dev.h)0
-rw-r--r--drivers/mfd/cros_ec_spi.c78
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c5
-rw-r--r--drivers/mfd/max77843.c1
-rw-r--r--drivers/mfd/palmas.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rave-sp.c710
-rw-r--r--drivers/mfd/stm32-lptimer.c6
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tmio_core.c20
-rw-r--r--drivers/mfd/twl4030-audio.c9
-rw-r--r--drivers/mfd/twl6040.c12
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/Kconfig20
-rw-r--r--drivers/misc/cardreader/Makefile4
-rw-r--r--drivers/misc/cardreader/rtl8411.c (renamed from drivers/mfd/rtl8411.c)2
-rw-r--r--drivers/misc/cardreader/rts5209.c (renamed from drivers/mfd/rts5209.c)2
-rw-r--r--drivers/misc/cardreader/rts5227.c (renamed from drivers/mfd/rts5227.c)2
-rw-r--r--drivers/misc/cardreader/rts5229.c (renamed from drivers/mfd/rts5229.c)2
-rw-r--r--drivers/misc/cardreader/rts5249.c (renamed from drivers/mfd/rts5249.c)3
-rw-r--r--drivers/misc/cardreader/rts5260.c748
-rw-r--r--drivers/misc/cardreader/rts5260.h45
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c (renamed from drivers/mfd/rtsx_pcr.c)128
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h (renamed from drivers/mfd/rtsx_pcr.h)12
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c (renamed from drivers/mfd/rtsx_usb.c)2
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/file.c4
-rw-r--r--drivers/misc/cxl/vphb.c2
-rw-r--r--drivers/misc/eeprom/at24.c26
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c7
-rw-r--r--drivers/misc/mic/scif/scif_epd.h2
-rw-r--r--drivers/misc/mic/scif/scif_fd.c2
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c4
-rw-r--r--drivers/misc/phantom.c4
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c179
-rw-r--r--drivers/mmc/core/block.c1385
-rw-r--r--drivers/mmc/core/block.h12
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/core.c228
-rw-r--r--drivers/mmc/core/core.h47
-rw-r--r--drivers/mmc/core/host.h6
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_test.c221
-rw-r--r--drivers/mmc/core/queue.c504
-rw-r--r--drivers/mmc/core/queue.h64
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/mmc/core/slot-gpio.c22
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile11
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/cqhci.c1150
-rw-r--r--drivers/mmc/host/cqhci.h240
-rw-r--r--drivers/mmc/host/davinci_mmc.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/mmci.c127
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi.h22
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c52
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c37
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c8
-rw-r--r--drivers/mmc/host/sdhci-acpi.c132
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c36
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c149
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-arasan.c331
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c218
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mmc/host/sdhci.c57
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c52
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c152
-rw-r--r--drivers/mtd/devices/docg3.c70
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c18
-rw-r--r--drivers/mtd/mtdcore.c38
-rw-r--r--drivers/mtd/mtdpart.c43
-rw-r--r--drivers/mtd/mtdswap.c5
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c40
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpio.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c117
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2309
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c15
-rw-r--r--drivers/mtd/nand/qcom_nandc.c31
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/parsers/sharpslpart.c6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c6
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c240
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c75
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c42
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c87
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/caif/caif_hsi.c1
-rw-r--r--drivers/net/can/c_can/c_can_pci.c4
-rw-r--r--drivers/net/can/dev.c47
-rw-r--r--drivers/net/can/flexcan.c243
-rw-r--r--drivers/net/can/m_can/m_can.c183
-rw-r--r--drivers/net/can/rx-offload.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c55
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c9
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c18
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c19
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/dsa/dsa_loop.c9
-rw-r--r--drivers/net/dsa/lan9303-core.c138
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c12
-rw-r--r--drivers/net/dsa/mt7530.c288
-rw-r--r--drivers/net/dsa/mt7530.h83
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c87
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c74
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/dummy.c215
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/8390/mac8390.c33
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c113
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h102
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c360
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h62
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c412
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c463
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h65
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c505
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h28
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h64
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1326
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h544
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h1523
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c416
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h137
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c184
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h8
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c164
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c27
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c814
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h11936
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c223
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c42
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h21
-rw-r--r--drivers/net/ethernet/cadence/macb.h170
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c758
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c353
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h36
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c58
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c297
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c31
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h156
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c306
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c607
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c323
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c286
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h52
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cortina/Kconfig23
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c41
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h88
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c)2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c)405
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h)28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c (renamed from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c)347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h129
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c418
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c342
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h248
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c181
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c26
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h17
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c43
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c146
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c14
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c54
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c74
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c192
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c255
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c143
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c92
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h54
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c228
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c83
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c72
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c589
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h61
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c372
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c34
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c687
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c253
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c46
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c402
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c548
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c631
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c558
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c453
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h157
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c988
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c297
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h213
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c159
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c79
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h71
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c156
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c95
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c113
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c135
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h210
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c76
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c94
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10603
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c1091
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c119
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h190
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c70
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h32
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c309
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c142
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c30
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c65
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c363
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h46
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c26
-rw-r--r--drivers/net/ethernet/sfc/io.h19
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h2453
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c168
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h33
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c370
-rw-r--r--drivers/net/ethernet/sfc/siena.c12
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/socionext/Kconfig34
-rw-r--r--drivers/net/ethernet/socionext/Makefile6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c1777
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c1736
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c69
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c24
-rw-r--r--drivers/net/ethernet/ti/cpsw.c96
-rw-r--r--drivers/net/ethernet/ti/cpsw.h23
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c109
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c6
-rw-r--r--drivers/net/geneve.c38
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h60
-rw-r--r--drivers/net/hyperv/netvsc.c73
-rw-r--r--drivers/net/hyperv/netvsc_drv.c111
-rw-r--r--drivers/net/hyperv/rndis_filter.c44
-rw-r--r--drivers/net/ieee802154/adf7242.c90
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c17
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c33
-rw-r--r--drivers/net/macsec.c68
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/netdevsim/Makefile11
-rw-r--r--drivers/net/netdevsim/bpf.c643
-rw-r--r--drivers/net/netdevsim/netdev.c504
-rw-r--r--drivers/net/netdevsim/netdevsim.h109
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/at803x.c48
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c6
-rw-r--r--drivers/net/phy/broadcom.c69
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c5
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/fixed_phy.c31
-rw-r--r--drivers/net/phy/icplus.c4
-rw-r--r--drivers/net/phy/intel-xway.c12
-rw-r--r--drivers/net/phy/lxt.c5
-rw-r--r--drivers/net/phy/marvell.c618
-rw-r--r--drivers/net/phy/marvell10g.c111
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio-xgene.c21
-rw-r--r--drivers/net/phy/mdio_bus.c100
-rw-r--r--drivers/net/phy/mdio_device.c34
-rw-r--r--drivers/net/phy/meson-gxl.c216
-rw-r--r--drivers/net/phy/micrel.c25
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c33
-rw-r--r--drivers/net/phy/phy-core.c258
-rw-r--r--drivers/net/phy/phy.c65
-rw-r--r--drivers/net/phy/phy_device.c118
-rw-r--r--drivers/net/phy/phylink.c455
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c105
-rw-r--r--drivers/net/phy/rockchip.c1
-rw-r--r--drivers/net/phy/sfp-bus.c227
-rw-r--r--drivers/net/phy/sfp.c119
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c1
-rw-r--r--drivers/net/phy/vitesse.c12
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tap.c46
-rw-r--r--drivers/net/tun.c462
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c235
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c43
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c636
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c342
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c993
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h225
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c293
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h165
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c365
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c600
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c72
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c230
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h52
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c146
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h115
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c133
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c46
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h107
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c126
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h9
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c85
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h18
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c304
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c192
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c121
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h112
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c635
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h117
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c531
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c487
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h73
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c462
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h262
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c314
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c131
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h68
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c97
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c61
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c7
-rw-r--r--drivers/net/wireless/mediatek/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig10
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile15
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c459
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c112
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c505
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h432
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h228
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c133
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c506
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c184
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c664
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c875
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c839
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c577
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c453
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h155
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_pci.c110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c740
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h587
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_trace.h144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c511
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h44
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c191
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c581
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c152
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c80
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h351
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c78
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c32
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c164
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c203
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c206
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c285
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c177
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c663
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h252
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c325
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c483
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c94
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c57
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h68
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c41
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c4
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nubus/Makefile2
-rw-r--r--drivers/nubus/bus.c117
-rw-r--r--drivers/nubus/nubus.c542
-rw-r--r--drivers/nubus/proc.c281
-rw-r--r--drivers/nvdimm/btt.c201
-rw-r--r--drivers/nvdimm/btt.h47
-rw-r--r--drivers/nvdimm/pfn_devs.c20
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/core.c152
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c8
-rw-r--r--drivers/nvme/host/lightnvm.c185
-rw-r--r--drivers/nvme/host/multipath.c44
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c282
-rw-r--r--drivers/nvme/host/rdma.c20
-rw-r--r--drivers/nvme/host/trace.c130
-rw-r--r--drivers/nvme/host/trace.h165
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c246
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/rdma.c83
-rw-r--r--drivers/nvmem/meson-mx-efuse.c4
-rw-r--r--drivers/of/base.c26
-rw-r--r--drivers/of/of_mdio.c17
-rw-r--r--drivers/of/property.c8
-rw-r--r--drivers/opp/Makefile1
-rw-r--r--drivers/opp/ti-opp-supply.c425
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/lba_pci.c33
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/pci-driver.c30
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_dsu_pmu.c843
-rw-r--r--drivers/perf/arm_pmu_platform.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c31
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/tegra/xusb.c58
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c476
-rw-r--r--drivers/pinctrl/pinctrl-single.c5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.h27
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/surfacepro3_button.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/pnp/pnpbios/core.c5
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c24
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/imx-snvs-poweroff.c66
-rw-r--r--drivers/power/reset/msm-poweroff.c7
-rw-r--r--drivers/power/reset/zx-reboot.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/axp20x_ac_power.c8
-rw-r--r--drivers/power/supply/axp288_charger.c290
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c203
-rw-r--r--drivers/power/supply/bq24190_charger.c126
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/charger-manager.c2
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c25
-rw-r--r--drivers/power/supply/max17042_battery.c54
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/powercap/intel_rapl.c99
-rw-r--r--drivers/powercap/powercap_sys.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/ptp/ptp_chardev.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c383
-rw-r--r--drivers/regulator/internal.h27
-rw-r--r--drivers/regulator/of_regulator.c34
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c84
-rw-r--r--drivers/regulator/sc2731-regulator.c256
-rw-r--r--drivers/regulator/tps65218-regulator.c5
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/lcs.h3
-rw-r--r--drivers/s390/net/qeth_core.h48
-rw-r--r--drivers/s390/net/qeth_core_main.c120
-rw-r--r--drivers/s390/net/qeth_core_mpc.h13
-rw-r--r--drivers/s390/net/qeth_l2.h3
-rw-r--r--drivers/s390/net/qeth_l2_main.c92
-rw-r--r--drivers/s390/net/qeth_l3.h17
-rw-r--r--drivers/s390/net/qeth_l3_main.c546
-rw-r--r--drivers/s390/net/qeth_l3_sys.c189
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h55
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c228
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c16
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h42
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c22
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c37
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c175
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c35
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h68
-rw-r--r--drivers/scsi/qedf/qedf_io.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c61
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c139
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c9
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c75
-rw-r--r--drivers/scsi/qedi/qedi_version.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_debugfs.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c72
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c52
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c13
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
-rw-r--r--drivers/spi/spi-armada-3700.c118
-rw-r--r--drivers/spi/spi-atmel.c115
-rw-r--r--drivers/spi/spi-bcm53xx.c26
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c9
-rw-r--r--drivers/spi/spi-imx.c26
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c1
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c18
-rw-r--r--drivers/spi/spi-sh-msiof.c128
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-xilinx.c12
-rw-r--r--drivers/ssb/Kconfig11
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c15
-rw-r--r--drivers/staging/ccree/ssi_hash.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c2
-rw-r--r--drivers/staging/irda/net/af_irda.c4
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c8
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c23
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c4
-rw-r--r--drivers/staging/most/aim-v4l2/video.c4
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_transport.c46
-rw-r--r--drivers/thermal/cpu_cooling.c201
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_r3964.c6
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/serdev/core.c43
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c24
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/vt/vc_screen.c4
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usblp.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c8
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c4
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/legacy/Kconfig12
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/core.c28
-rw-r--r--drivers/usb/host/xhci-debugfs.c16
-rw-r--r--drivers/usb/host/xhci-mem.c15
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/mon/mon_bin.c12
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/stub_rx.c47
-rw-r--r--drivers/usb/usbip/stub_tx.c13
-rw-r--r--drivers/usb/usbip/usbip_common.c33
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c12
-rw-r--r--drivers/usb/usbip/vhci_rx.c23
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/usb/usbip/vhci_tx.c3
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/vfio/virqfd.c4
-rw-r--r--drivers/vhost/net.c70
-rw-r--r--drivers/vhost/vhost.c25
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/virtio/virtio_mmio.c43
-rw-r--r--drivers/w1/masters/w1-gpio.c149
-rw-r--r--drivers/w1/w1_netlink.h6
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c337
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/mcelog.c2
-rw-r--r--drivers/xen/pvcalls-front.c12
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
2354 files changed, 143122 insertions, 57405 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 46505396869e..d650c5b6ec90 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
-config X86_PM_TIMER
- bool "Power Management Timer Support" if EXPERT
- depends on X86
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
- in most cases even if ACPI is unusable or blacklisted.
-
- This timing source is not affected by power management features
- like aggressive processor idling, throttling, frequency and/or
- voltage scaling, unlike the commonly used Time Stamp Counter
- (TSC) timing source.
-
- You should nearly always say Y here because many modern
- systems require this timer.
-
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EXPERT
+ depends on X86 && (ACPI || JAILHOUSE_GUEST)
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 3ec05aa1a903..2ff5c8c04e3b 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -718,9 +718,9 @@ again:
return size > 0 ? size : ret;
}
-static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
+static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
{
- int masks = 0;
+ __poll_t masks = 0;
poll_wait(file, &acpi_aml_io.wait, wait);
if (acpi_aml_user_readable())
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02cc8ea1..2bcffec8dbf0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
return 0;
}
+struct lpss_device_links {
+ const char *supplier_hid;
+ const char *supplier_uid;
+ const char *consumer_hid;
+ const char *consumer_uid;
+ u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+ const char *hid2, const char *uid2)
+{
+ return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+ const char *hid;
+ const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct hid_uid *id = data;
+
+ if (!adev)
+ return 0;
+
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+ struct hid_uid data = {
+ .hid = hid,
+ .uid = uid,
+ };
+
+ return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(adev->handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+ &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == handle)
+ return true;
+ }
+
+ return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ device_link_add(dev2, dev1, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ device_link_add(dev1, dev2, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+ struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+ const struct lpss_device_links *link = &lpss_device_links[i];
+
+ if (acpi_lpss_is_supplier(adev, link))
+ acpi_lpss_link_consumer(&pdev->dev, link);
+
+ if (acpi_lpss_is_consumer(adev, link))
+ acpi_lpss_link_supplier(&pdev->dev, link);
+ }
+}
+
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
+ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+ adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
+ acpi_lpss_create_device_links(adev, pdev);
return 1;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0e2eb8..f53ccc680238 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1)
+ only_lcd = acpi_osi_is_win8();
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b5ac5c..2243c8164b34 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
prefix, ACPICA_COPYRIGHT, \
prefix
+#define ACPI_COMMON_BUILD_TIME \
+ "Build date/time: %s %s\n", __DATE__, __TIME__
+
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5252f5..54b8d9df9423 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types);
+
+void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed442703f..45ef3f5dc9ad 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
/*****************************************************************************
*
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
*
****************************************************************************/
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
*
****************************************************************************/
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
-/* Misc */
+/* Miscellaneous */
ACPI_GLOBAL(u32, acpi_gbl_original_mode);
ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
/* Lists for tracking memory allocations (debug only) */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
/*****************************************************************************
*
- * Namespace globals
+ * ACPI Namespace
*
****************************************************************************/
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
/*****************************************************************************
*
- * Interpreter globals
+ * Interpreter/Parser globals
*
****************************************************************************/
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
/*****************************************************************************
*
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
****************************************************************************/
#if (!ACPI_REDUCED_HARDWARE)
-
ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
ACPI_GLOBAL(struct acpi_fixed_event_handler,
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
ACPI_GLOBAL(u32, acpi_sci_count);
ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
/*****************************************************************************
*
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
*
****************************************************************************/
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
#endif /* ACPI_DEBUGGER */
#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
#endif
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
*acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
#ifdef ACPI_APPLICATION
-
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
#endif /* ACPI_APPLICATION */
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8bb1678..a56675f0661e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u32 loop_count; /* While() loop counter */
+ u64 loop_timeout; /* While() loop timeout */
};
/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
acpi_object_type *types;
/*
- * Arguments to be passed to method for the command
- * Threads -
- * the Number of threads, ID of current thread and
- * Index of current thread inside all them created.
+ * Arguments to be passed to method for the commands Threads and
+ * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+ *
+ * For the Threads command, the Number of threads, ID of current
+ * thread and Index of current thread inside all them created.
*/
char init_args;
#ifdef ACPI_DEBUGGER
- acpi_object_type arg_types[4];
+ acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
#endif
- char *arguments[4];
+ char *arguments[ACPI_METHOD_NUM_ARGS];
char num_threads_str[11];
char id_of_thread_str[11];
char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96cc00f..128a3d71b598 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51b3e37..2fb1bb78d85c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9db7ef..b6b29d717824 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
#ifndef ACPI_MSG_ERROR
#define ACPI_MSG_ERROR "ACPI Error: "
#endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION "ACPI Exception: "
-#endif
#ifndef ACPI_MSG_WARNING
#define ACPI_MSG_WARNING "ACPI Warning: "
#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
#endif
#ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
#endif
#ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
#endif
/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
*/
acpi_status acpi_ut_init_globals(void);
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
const char *acpi_ut_get_mutex_name(u32 mutex_id);
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
#endif
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
void acpi_ut_repair_name(char *name);
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
u8 node_flags, const char *format, ...);
void
-acpi_ut_namespace_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_name,
+ acpi_status lookup_status);
void
acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319752f0..ed088fceb18d 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
acpi_db_execution_walk(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
ACPI_FUNCTION_NAME(db_execute_setup);
- /* Catenate the current scope to the supplied name */
+ /* Concatenate the current scope to the supplied name */
info->pathname[0] = 0;
if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_single_execution_thread
+ *
+ * PARAMETERS: context - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+ struct acpi_db_method_info *info = context;
+ acpi_status status;
+ struct acpi_buffer return_obj;
+
+ acpi_os_printf("\n");
+
+ status = acpi_db_execute_method(info, &return_obj);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s During evaluation of %s\n",
+ acpi_format_exception(status), info->pathname);
+ return;
+ }
+
+ /* Display a return object, if any */
+
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ acpi_gbl_db_method_info.pathname,
+ return_obj.pointer, (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ }
+
+ acpi_os_printf("\nBackground thread completed\n%c ",
+ ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_create_execution_thread
+ *
+ * PARAMETERS: method_name_arg - Control method to execute
+ * arguments - Array of arguments to the method
+ * types - Corresponding array of object types
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ * arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types)
+{
+ acpi_status status;
+ u32 i;
+
+ memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+ acpi_gbl_db_method_info.name = method_name_arg;
+ acpi_gbl_db_method_info.init_args = 1;
+ acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+ acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+ /* Setup method arguments, up to 7 (0-6) */
+
+ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+ acpi_gbl_db_method_info.arguments[i] = *arguments;
+ arguments++;
+
+ acpi_gbl_db_method_info.arg_types[i] = *types;
+ types++;
+ }
+
+ status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Get the NS node, determines existence also */
+
+ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+ &acpi_gbl_db_method_info.method);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s Could not get handle for %s\n",
+ acpi_format_exception(status),
+ acpi_gbl_db_method_info.pathname);
+ return;
+ }
+
+ status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+ acpi_db_single_execution_thread,
+ &acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ acpi_os_printf("\nBackground thread started\n");
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_create_execution_threads
*
* PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea291d93..cf9607945704 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
}
acpi_os_printf("Debug output file %s opened\n", name);
- strncpy(acpi_gbl_db_debug_filename, name,
- sizeof(acpi_gbl_db_debug_filename));
+ acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+ sizeof(acpi_gbl_db_debug_filename));
acpi_gbl_db_output_to_file = TRUE;
}
#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79db064..954ca3b981a7 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
CMD_UNLOAD,
CMD_TERMINATE,
+ CMD_BACKGROUND,
CMD_THREADS,
CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"UNLOAD", 1},
{"TERMINATE", 0},
+ {"BACKGROUND", 1},
{"THREADS", 3},
{"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
/*
* Help for all debugger commands. First argument is the number of lines
* of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
*/
static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
- {0, "\nGeneral-Purpose Commands:", "\n"},
+ {0, "\nNamespace Access:", "\n"},
+ {1, " Businfo", "Display system bus info\n"},
+ {1, " Disassemble <Method>", "Disassemble a control method\n"},
+ {1, " Find <AcpiName> (? is wildcard)",
+ "Find ACPI name(s) with wildcards\n"},
+ {1, " Integrity", "Validate namespace integrity\n"},
+ {1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Namespace [Object] [Depth]",
+ "Display loaded namespace tree/subtree\n"},
+ {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
+ {1, " Objects [ObjectType]",
+ "Display summary of all objects or just given type\n"},
+ {1, " Owner <OwnerId> [Depth]",
+ "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
+ {1, " Predefined", "Check all predefined names\n"},
+ {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+ {1, " References <Addr>", "Find all references to object at addr\n"},
+ {1, " Resources [DeviceName]",
+ "Display Device resources (no arg = all devices)\n"},
+ {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
+ {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+ {1, " Type <Object>", "Display object type\n"},
+
+ {0, "\nControl Method Execution:", "\n"},
+ {1, " Evaluate <Namepath> [Arguments]",
+ "Evaluate object or control method\n"},
+ {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Background <Namepath> [Arguments]",
+ "Evaluate object/method in a separate thread\n"},
+ {1, " Thread <Threads><Loops><NamePath>",
+ "Spawn threads to execute method(s)\n"},
+#endif
+ {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+ {7, " [Arguments] formats:", "Control method argument formats\n"},
+ {1, " Hex Integer", "Integer\n"},
+ {1, " \"Ascii String\"", "String\n"},
+ {1, " (Hex Byte List)", "Buffer\n"},
+ {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+ {1, " [Package Element List]", "Package\n"},
+ {1, " [0x01 0x1234 \"string\"]",
+ "Package example (3 elements)\n"},
+
+ {0, "\nMiscellaneous:", "\n"},
{1, " Allocations", "Display list of current memory allocations\n"},
{2, " Dump <Address>|<Namepath>", "\n"},
{0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Stack", "Display CPU stack usage\n"},
{1, " Tables", "Info about current ACPI table(s)\n"},
{1, " Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Terminate", "Delete namespace and all internal objects\n"},
+#endif
{1, " ! <CommandNumber>", "Execute command from history buffer\n"},
{1, " !!", "Execute last command again\n"},
- {0, "\nNamespace Access Commands:", "\n"},
- {1, " Businfo", "Display system bus info\n"},
- {1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Find <AcpiName> (? is wildcard)",
- "Find ACPI name(s) with wildcards\n"},
- {1, " Integrity", "Validate namespace integrity\n"},
- {1, " Methods", "Display list of loaded control methods\n"},
- {1, " Namespace [Object] [Depth]",
- "Display loaded namespace tree/subtree\n"},
- {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
- {1, " Objects [ObjectType]",
- "Display summary of all objects or just given type\n"},
- {1, " Owner <OwnerId> [Depth]",
- "Display loaded namespace by object owner\n"},
- {1, " Paths", "Display full pathnames of namespace objects\n"},
- {1, " Predefined", "Check all predefined names\n"},
- {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
- {1, " References <Addr>", "Find all references to object at addr\n"},
- {1, " Resources [DeviceName]",
- "Display Device resources (no arg = all devices)\n"},
- {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
- {1, " Type <Object>", "Display object type\n"},
+ {0, "\nMethod and Namespace Debugging:", "\n"},
+ {5, " Trace <State> [<Namepath>] [Once]",
+ "Trace control method execution\n"},
+ {1, " Enable", "Enable all messages\n"},
+ {1, " Disable", "Disable tracing\n"},
+ {1, " Method", "Enable method execution messages\n"},
+ {1, " Opcode", "Enable opcode execution messages\n"},
+ {3, " Test <TestName>", "Invoke a debug test\n"},
+ {1, " Objects", "Read/write/compare all namespace data objects\n"},
+ {1, " Predefined",
+ "Validate all ACPI predefined names (_STA, etc.)\n"},
+ {1, " Execute predefined",
+ "Execute all predefined (public) methods\n"},
- {0, "\nControl Method Execution Commands:", "\n"},
+ {0, "\nControl Method Single-Step Execution:", "\n"},
{1, " Arguments (or Args)", "Display method arguments\n"},
{1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
{1, " Call", "Run to next control method invocation\n"},
- {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
- {6, " Evaluate", "Synonym for Execute\n"},
- {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
- {1, " Hex Integer", "Integer method argument\n"},
- {1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Hex Byte List)", "Buffer method argument\n"},
- {1, " [Package Element List]", "Package method argument\n"},
- {5, " Execute predefined",
- "Execute all predefined (public) methods\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
{1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Results", "Display method result stack\n"},
{1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
{1, " Stop", "Terminate control method\n"},
- {5, " Trace <State> [<Namepath>] [Once]",
- "Trace control method execution\n"},
- {1, " Enable", "Enable all messages\n"},
- {1, " Disable", "Disable tracing\n"},
- {1, " Method", "Enable method execution messages\n"},
- {1, " Opcode", "Enable opcode execution messages\n"},
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
#ifdef ACPI_APPLICATION
- {0, "\nHardware Simulation Commands:", "\n"},
- {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
- {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPE devices\n"},
- {1, " Sci", "Generate an SCI\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
- {0, "\nFile I/O Commands:", "\n"},
+ {0, "\nFile Operations:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
{1, " Open <Output Filename>", "Open a file for debug output\n"},
{1, " Unload <Namepath>",
"Unload an ACPI table via namespace object\n"},
- {0, "\nUser Space Commands:", "\n"},
- {1, " Terminate", "Delete namespace and all internal objects\n"},
- {1, " Thread <Threads><Loops><NamePath>",
- "Spawn threads to execute method(s)\n"},
-
- {0, "\nDebug Test Commands:", "\n"},
- {3, " Test <TestName>", "Invoke a debug test\n"},
- {1, " Objects", "Read/write/compare all namespace data objects\n"},
- {1, " Predefined",
- "Execute all ACPI predefined names (_STA, etc.)\n"},
+ {0, "\nHardware Simulation:", "\n"},
+ {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPE devices\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
#endif
{0, NULL, NULL}
};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
/* No argument to help, display help for all commands */
+ acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
while (next->invocation) {
acpi_os_printf("%-38s%s", next->invocation,
next->description);
next++;
}
+ acpi_os_printf("\n");
+
} else {
/* Display help for all commands that match the subtring */
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
/* acpi_initialize (NULL); */
break;
+ case CMD_BACKGROUND:
+
+ acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+ &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2]);
+ break;
+
case CMD_THREADS:
acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81b0499..4b6ebc2a2851 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
+ control_state->control.loop_timeout = acpi_os_get_timer() +
+ (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
/* Predicate was true, the body of the loop was just executed */
/*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
+ * This infinite loop detection mechanism allows the interpreter
+ * to escape possibly infinite loops. This can occur in poorly
+ * written AML when the hardware does not respond within a while
+ * loop and the loop does not implement a timeout.
*/
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- acpi_gbl_max_loop_iterations) {
- status = AE_AML_INFINITE_LOOP;
+ if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+ control_state->control.
+ loop_timeout)) {
+ status = AE_AML_LOOP_TIMEOUT;
break;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5ea029..0cab34a593d5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
walk_state,
&info->connection_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ child->common.
value.name,
status);
return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
return_ACPI_STATUS(status);
} else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 82448551781b..b21fe084ffc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(op->common.value.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ op->common.value.
string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487edfe2de..5a602b75084e 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
{
union acpi_operand_object **element_ptr;
+ ACPI_FUNCTION_TRACE(ds_init_package_element);
+
if (!source_object) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
source_object->package.flags |= AOPOBJ_DATA_VALID;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
union acpi_generic_state scope_info;
union acpi_operand_object *element = *element_ptr;
struct acpi_namespace_node *resolved_node;
+ struct acpi_namespace_node *original_node;
char *external_path = NULL;
acpi_object_type type;
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
+ original_node = resolved_node;
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
if (ACPI_FAILURE(status)) {
return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
-
- /* TBD: This may not be necesssary */
-
- acpi_ut_add_reference(resolved_node->object);
+ case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
+ /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+ acpi_ut_remove_reference(original_node->object);
break;
default:
/*
* For all other types - the node was resolved to an actual
- * operand object with a value, return the object
+ * operand object with a value, return the object. Remove
+ * a reference on the existing object.
*/
+ acpi_ut_remove_reference(element);
*element_ptr = (union acpi_operand_object *)resolved_node;
break;
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b95684..4c5faf629a83 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(name_string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ name_string, status);
}
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a89702..5771e4e4a99a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+ status);
return_ACPI_STATUS(status);
}
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ path, status);
return_ACPI_STATUS(status);
}
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef5a4ec..b3d0aaec8203 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ buffer_ptr,
status);
}
#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
return_ACPI_STATUS(status);
}
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
}
break;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447ff92df..bb58419f0d61 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
+
+ /*
+ * Special case for an EC timeout. These are seen so frequently
+ * that an additional error message is helpful
+ */
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+ (status == AE_TIME)) {
+ ACPI_ERROR((AE_INFO,
+ "Timeout from EC hardware or EC device driver"));
+ }
}
if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc4b7c2..b2ff61bdb9a8 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME(ex_dump_operand)
+ ACPI_FUNCTION_NAME(ex_dump_operand);
- /* Check if debug output enabled */
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -904,7 +905,7 @@ void
acpi_ex_dump_operands(union acpi_operand_object **operands,
const char *opcode_name, u32 num_operands)
{
- ACPI_FUNCTION_NAME(ex_dump_operands);
+ ACPI_FUNCTION_TRACE(ex_dump_operands);
if (!opcode_name) {
opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** End operand dump for [%s]\n", opcode_name));
- return;
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25d45b1..5b4282902a83 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
*
******************************************************************************/
acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
{
acpi_status status;
- u32 delta_ticks;
+ u64 delta_ticks;
u64 quotient;
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_SUPPORT);
}
+ if (start_ticks == end_ticks) {
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
*/
- if (start_ticks < end_ticks) {
- delta_ticks = end_ticks - start_ticks;
- } else if (start_ticks > end_ticks) {
+ delta_ticks = end_ticks;
+ if (start_ticks > end_ticks) {
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
/* 24-bit Timer */
- delta_ticks =
- (((0x00FFFFFF - start_ticks) +
- end_ticks) & 0x00FFFFFF);
+ delta_ticks |= (u64)1 << 24;
} else {
/* 32-bit Timer */
- delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ delta_ticks |= (u64)1 << 32;
}
- } else { /* start_ticks == end_ticks */
-
- *time_elapsed = 0;
- return_ACPI_STATUS(AE_OK);
}
+ delta_ticks -= start_ticks;
/*
* Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
* time_elapsed (microseconds) =
* (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
- *time_elapsed = (u32) quotient;
+ *time_elapsed = (u32)quotient;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec4eab4..d1679035d5f3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_NAME(hw_validate_io_request);
+ ACPI_FUNCTION_TRACE(hw_validate_io_request);
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return (AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return (AE_LIMIT);
+ return_ACPI_STATUS(AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f51ca8d..33e652a12fca 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag &&
- (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags |= IMPLICIT_EXTERNAL;
- }
-#endif
}
/* Special handling for the last segment (num_segments == 0) */
else {
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag
+ && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags &= ~IMPLICIT_EXTERNAL;
+ }
+#endif
+
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775bbc92..d55dcc82f434 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
/* Check if we are resolving a named reference within a package */
- ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+ ACPI_ERROR_NAMESPACE(&scope_info,
+ original_object->string.pointer, status);
goto error_exit;
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760a0308..22c92d1a24d8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
* for error and debug statements.
*
******************************************************************************/
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
return_PTR(name_buffer);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ * 1) Path associated with the prefix_scope namespace node
+ * 2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path)
+{
+ acpi_status status;
+ char *full_path = NULL;
+ char *external_path = NULL;
+ char *prefix_path = NULL;
+ u32 prefix_path_length = 0;
+
+ /* If there is a prefix, get the pathname to it */
+
+ if (prefix_scope && prefix_scope->scope.node) {
+ prefix_path =
+ acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+ TRUE);
+ if (prefix_path) {
+ prefix_path_length = strlen(prefix_path);
+ }
+ }
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+ NULL, &external_path);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+ full_path =
+ ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+ 2);
+ if (!full_path) {
+ goto cleanup;
+ }
+
+ /* Don't merge if the External path is already fully qualified */
+
+ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+ strcat(full_path, prefix_path);
+ if (prefix_path[1]) {
+ strcat(full_path, ".");
+ }
+ }
+
+ acpi_ns_normalize_pathname(external_path);
+ strcat(full_path, external_path);
+
+cleanup:
+ if (prefix_path) {
+ ACPI_FREE(prefix_path);
+ }
+ if (external_path) {
+ ACPI_FREE(external_path);
+ }
+
+ return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_normalize_pathname
+ *
+ * PARAMETERS: original_path - Path to be normalized, in External format
+ *
+ * RETURN: The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ * For example: \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+ char *input_path = original_path;
+ char *new_path_buffer;
+ char *new_path;
+ u32 i;
+
+ /* Allocate a temp buffer in which to construct the new path */
+
+ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+ new_path = new_path_buffer;
+ if (!new_path_buffer) {
+ return;
+ }
+
+ /* Special characters may appear at the beginning of the path */
+
+ if (*input_path == '\\') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ while (*input_path == '^') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ /* Remainder of the path */
+
+ while (*input_path) {
+
+ /* Do one nameseg at a time */
+
+ for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+ *new_path = *input_path;
+ new_path++;
+ }
+
+ input_path++;
+ }
+
+ /* Dot means that there are more namesegs to come */
+
+ if (*input_path == '.') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+ }
+
+ *new_path = 0;
+ strcpy(original_path, new_path_buffer);
+ ACPI_FREE(new_path_buffer);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957f5ef0..e91dbee9235f 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
+ new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c838aee..9b51f65823b2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
- * external_params - List of parameters to pass to method,
+ * external_params - List of parameters to pass to a method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
- * return_buffer - Where to put method's return value (if
+ * return_buffer - Where to put the object's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
+ /* Get a handle here, in order to build an error message if needed */
+
+ target_handle = handle;
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- } else {
- target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfaca555f..171e2faa7c50 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* Final exception check (may have been changed from code above) */
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df71bba..c0b179883ff2 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
* external declaration opcode. Setting walk_state->Aml to
* walk_state->parser_state.Aml + 2 moves increments the
* walk_state->Aml past the object type and the paramcount of the
- * external opcode. For the error message, only print the AML
- * offset. We could attempt to print the name but this may cause
- * a segmentation fault when printing the namepath because the
- * AML may be incorrect.
+ * external opcode.
*/
- acpi_os_printf
- ("// Invalid external declaration at AML offset 0x%x.\n",
- walk_state->aml -
- walk_state->parser_state.aml_start);
walk_state->aml = walk_state->parser_state.aml + 2;
+ walk_state->parser_state.aml = walk_state->aml;
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 02642760cb93..cd59dfe6a47d 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
- (acpi_ps_get_opcode_info(opcode))->
- name, sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.
+ aml_op_name)));
}
/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (opcode == AML_SCOPE_OP) {
acpi_gbl_current_scope = op;
}
- }
- if (gbl_capture_comments) {
- ASL_CV_TRANSFER_COMMENTS(op);
+ if (acpi_gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
+ }
}
return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885e2ca3..cff7154b7fee 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
{
acpi_thread_id thread_id;
va_list args;
+#ifdef ACPI_APPLICATION
+ int fill_count;
+#endif
/* Check if debug output enabled */
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+ fill_count = 48 - acpi_gbl_nesting_level -
+ strlen(acpi_ut_trim_function_name(function_name));
+ if (fill_count < 0) {
+ fill_count = 0;
+ }
+
+ acpi_os_printf("[%02ld] %*s",
+ acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+ acpi_os_printf("%s%*s: ",
+ acpi_ut_trim_function_name(function_name), fill_count,
+ " ");
+#else
acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
va_start(args, format);
acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2d961a..55debbad487d 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
return (acpi_gbl_ref_class_names[object->reference.class]);
}
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
return (acpi_gbl_mutex_names[mutex_id]);
}
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e3368186e1c1..42388dcb5ccc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ * prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_path,
+ acpi_status lookup_status)
+{
+ char *full_path;
+ const char *message;
+
+ /*
+ * Main cases:
+ * 1) Object creation, object must not already exist
+ * 2) Object lookup, object must exist
+ */
+ switch (lookup_status) {
+ case AE_ALREADY_EXISTS:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure creating";
+ break;
+
+ case AE_NOT_FOUND:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure looking up";
+ break;
+
+ default:
+
+ acpi_os_printf(ACPI_MSG_ERROR);
+ message = "Failure looking up";
+ break;
+ }
+
+ /* Concatenate the prefix path and the internal path */
+
+ full_path =
+ acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+ acpi_os_printf("%s [%s], %s", message,
+ full_path ? full_path : "Could not get pathname",
+ acpi_format_exception(lookup_status));
+
+ if (full_path) {
+ ACPI_FREE(full_path);
+ }
+
+ ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_namespace_error
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d1691d..45eeb0dcf283 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
- acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680076c4..2055a858e5f5 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.hi = operand_ovl.part.lo;
- operand_ovl.part.lo ^= operand_ovl.part.lo;
+ operand_ovl.part.lo = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.lo = operand_ovl.part.hi;
- operand_ovl.part.hi ^= operand_ovl.part.hi;
+ operand_ovl.part.hi = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 586354788018..524ba931d5e8 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %u could not acquire Mutex [0x%X]",
- (u32)this_thread_id, mutex_id));
+ "Thread %u could not acquire Mutex [%s] (0x%X)",
+ (u32)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
}
return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [0x%X] is not acquired, cannot release",
- mutex_id));
+ "Mutex [%s] (0x%X) is not acquired, cannot release",
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
return (AE_NOT_ACQUIRED);
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 792664982ea3..33a0970646df 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
/*******************************************************************************
*
* FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
strncat(dest, source, max_transfer_length);
return (FALSE);
}
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+ /* Always terminate destination string */
+
+ strncpy(dest, source, dest_size);
+ dest[dest_size - 1] = 0;
+}
+
#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b133c0e4..f6b8dd24b006 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
+ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5cec94f..97f48d71f9e6 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
/*******************************************************************************
*
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
* FUNCTION: acpi_ut_strtoul_multiply64
*
* PARAMETERS: multiplicand - Current accumulated converted integer
- * multiplier - Base/Radix
+ * base - Base/Radix
* out_product - Where the product is returned
*
* RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
******************************************************************************/
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
{
- u64 val;
+ u64 product;
+ u64 quotient;
/* Exit if either operand is zero */
*out_product = 0;
- if (!multiplicand || !multiplier) {
+ if (!multiplicand || !base) {
return (AE_OK);
}
- /* Check for 64-bit overflow before the actual multiplication */
-
- acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
- if (multiplicand > val) {
+ /*
+ * Check for 64-bit overflow before the actual multiplication.
+ *
+ * Notes: 64-bit division is often not supported on 32-bit platforms
+ * (it requires a library function), Therefore ACPICA has a local
+ * 64-bit divide function. Also, Multiplier is currently only used
+ * as the radix (8/10/16), to the 64/32 divide will always work.
+ */
+ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+ if (multiplicand > quotient) {
return (AE_NUMERIC_OVERFLOW);
}
- val = multiplicand * multiplier;
+ product = multiplicand * base;
/* Check for 32-bit overflow if necessary */
- if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
return (AE_NUMERIC_OVERFLOW);
}
- *out_product = val;
+ *out_product = product;
return (AE_OK);
}
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
* FUNCTION: acpi_ut_strtoul_add64
*
* PARAMETERS: addend1 - Current accumulated converted integer
- * addend2 - New hex value/char
+ * digit - New hex value/char
* out_sum - Where sum is returned (Accumulator)
*
* RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
*
******************************************************************************/
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
{
u64 sum;
/* Check for 64-bit overflow before the actual addition */
- if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
return (AE_NUMERIC_OVERFLOW);
}
- sum = addend1 + addend2;
+ sum = addend1 + digit;
/* Check for 32-bit overflow if necessary */
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88ecbd5..633b4e2c669f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+ acpi_ut_safe_strncpy(allocation->module, (char *)module,
+ ACPI_MAX_MODULE_NAME);
if (!element) {
@@ -717,7 +717,7 @@ exit:
if (!num_outstanding) {
ACPI_INFO(("No outstanding allocations"));
} else {
- ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
num_outstanding, num_outstanding));
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e500bfa..9da4f8ef2e77 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* RETURN: None
*
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- * and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ * info as well as decoded acpi_status.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
/* For AE_OK, just print the message */
if (ACPI_SUCCESS(status)) {
- acpi_os_printf(ACPI_MSG_EXCEPTION);
+ acpi_os_printf(ACPI_MSG_ERROR);
} else {
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_os_printf(ACPI_MSG_ERROR "%s, ",
acpi_format_exception(status));
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6742f6c68034..9bff853e85f3 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1007,7 +1007,7 @@ skip:
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < sizeof(*rcd)) {
+ else if (len < 0 || len < sizeof(*rcd)) {
rc = -EIO;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7fad3bb..1efefe919555 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -410,7 +410,52 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
flags = 0;
if (flags != -1)
- memory_failure_queue(pfn, 0, flags);
+ memory_failure_queue(pfn, flags);
+#endif
+}
+
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ * These need to be reported by the AER driver but no recovery is
+ * necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ * These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ * panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+ if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+ /*
+ * If firmware reset the component to contain
+ * the error, we must reinitialize it before
+ * use, so treat it as a fatal AER error.
+ */
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
+ }
#endif
}
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
ghes_handle_memory_failure(gdata, sev);
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(gdata->error_severity);
-
- /*
- * If firmware reset the component to contain
- * the error, we must reinitialize it before
- * use, so treat it as a fatal AER error.
- */
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
- pcie_err->aer_info);
- }
-
+ ghes_handle_aer(gdata);
}
-#endif
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = cper_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56e33ae..19bc440820e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
+static int battery_full_discharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+ if (battery_full_discharging && battery->rate_now == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+ battery_full_discharging = 1;
+ return 0;
+}
+
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
.callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS GL502VSK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX305LA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+ },
+ },
{},
};
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
-#endif
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
-#endif
goto fail;
}
+#endif
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 4d0979e02a28..f87ed3be779a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
}
EXPORT_SYMBOL_GPL(acpi_match_device);
+void *acpi_get_match_data(const struct device *dev)
+{
+ const struct acpi_device_id *match;
+
+ if (!dev->driver)
+ return NULL;
+
+ if (!dev->driver->acpi_match_table)
+ return NULL;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return NULL;
+
+ return (void *)match->driver_data;
+}
+EXPORT_SYMBOL_GPL(acpi_get_match_data);
+
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d371fa7..e1eee7a60fad 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+ {
+ /* GP-electronic T701 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {}
+};
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
/* Send the platform triggered reliable event */
if (do_update) {
+ acpi_handle_debug(device->handle, "ACPI LID %s\n",
+ state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ return -ENODEV;
+
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 30e84cc600ae..06ea4749ebd9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1171,7 +1171,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
int ret = 0;
if (!cpc_desc || pcc_ss_id < 0) {
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..c4d0a1c912f0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
- if (dev->power.direct_complete && pm_resume_via_firmware())
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * acpi_subsys_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
- return pm_generic_suspend_noirq(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
int acpi_subsys_resume_noirq(struct device *dev)
{
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -1138,7 +1159,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
* skip all of the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b9af3d..d9f38c645e4a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f060356a22..f13ba2c07667 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
struct acpi_ged_event {
struct list_head node;
struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
- struct device *dev = context;
+ struct acpi_ged_device *geddev = context;
+ struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_ERROR;
}
- dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
- if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
- irqflags, "ACPI:Ged", event)) {
+ if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
+ dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+ list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
+ struct acpi_ged_device *geddev;
acpi_status acpi_ret;
+ geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+ if (!geddev)
+ return -ENOMEM;
+
+ geddev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
- acpi_ged_request_interrupt, &pdev->dev);
+ acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+ platform_set_drvdata(pdev, geddev);
return 0;
}
+static void ged_shutdown(struct platform_device *pdev)
+{
+ struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ free_irq(event->irq, event);
+ list_del(&event->node);
+ dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+ event->gsi, event->irq);
+ }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+ ged_shutdown(pdev);
+ return 0;
+}
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
+ .remove = ged_remove,
+ .shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423de43c..1d0a501bc7f0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ff2580e7611d..abeb4df4f22e 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_name(&adev_dimm->dev));
return -ENXIO;
}
+ /*
+ * Record nfit_mem for the notification path to track back to
+ * the nfit sysfs attributes for this dimm device object.
+ */
+ dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
* Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
sysfs_put(nfit_mem->flags_attr);
nfit_mem->flags_attr = NULL;
}
- if (adev_dimm)
+ if (adev_dimm) {
acpi_remove_notify_handler(adev_dimm->handle,
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+ dev_set_drvdata(&adev_dimm->dev, NULL);
+ }
}
mutex_unlock(&acpi_desc->init_mutex);
}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc0fda4..8ccaae3550d2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
srat_proc, ARRAY_SIZE(srat_proc), 0);
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_memory_affinity, 0);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914dfc3e..85ad679390e3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
- printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ pr_info("%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
}
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aad4d20..886ac8b93cd0 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
&intel_bxtwc_pmic_opregion_data);
}
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
{ .name = "bxt_wcove_region" },
{},
};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
},
.id_table = bxt_wc_opregion_id_table,
};
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9c9c7a..f6d73a243d80 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
},
.id_table = chtdc_ti_pmic_opregion_id_table,
};
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7a9d39..9912422c8185 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
&intel_cht_wc_pmic_opregion_data);
}
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
{ .name = "cht_wcove_region" },
{},
};
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
.probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
},
.id_table = cht_wc_opregion_id_table,
};
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761ab1bc..7ffa74048107 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
.name = "crystal_cove_pmic",
},
};
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f81095..316e55174aa9 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
.name = "axp288_pmic_acpi",
},
};
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d50a7b6ccddd..5f0071c7e2e1 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e26ea209b63e..466d1503aba0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return acpi_get_match_data(dev);
+}
+
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
+ .device_get_match_data = acpi_fwnode_device_get_match_data, \
.property_present = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8082871b409a..46cde0912762 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{},
};
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+ ignore_blacklist = true;
+}
+
static void __init acpi_sleep_dmi_check(void)
{
int year;
+ if (ignore_blacklist)
+ return;
+
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
acpi_nvs_nosave_s3();
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+ if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+ (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150bb35bf..4fc59c3bc673 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX 0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX 0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
- acpi_masked_gpes |= ((u64)1<<gpe);
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
acpi_status status;
u8 gpe;
- for (gpe = 0;
- gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
- gpe++) {
- if (acpi_masked_gpes & ((u64)1<<gpe)) {
- status = acpi_get_gpe_device(gpe, &handle);
- if (ACPI_SUCCESS(status)) {
- pr_info("Masking GPE 0x%x.\n", gpe);
- (void)acpi_mask_gpe(handle, gpe, TRUE);
- }
+ for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1acebe3..78db97687f26 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
}
EXPORT_SYMBOL(acpi_dev_found);
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+ const char *dev_name;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
- struct acpi_dev_present_info *match = data;
+ struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
+ match->dev_name = acpi_dev_name(adev);
+
if (match->hrv == -1)
return 1;
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
- struct acpi_dev_present_info match = {};
+ struct acpi_dev_match_info match = {};
struct device *dev;
strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match,
- acpi_dev_present_cb);
-
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ struct acpi_dev_match_info match = {};
+ struct device *dev;
+
+ strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ match.uid = uid;
+ match.hrv = hrv;
+
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bccec9de0533..cc89d0d2b965 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -482,7 +482,8 @@ enum binder_deferred_state {
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
- * (invariant after initialized)
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
int pid;
struct task_struct *tsk;
struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
- if (files == NULL)
- return -ESRCH;
-
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+ mutex_unlock(&proc->files_lock);
+ return ret;
}
/*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
+ mutex_lock(&proc->files_lock);
if (proc->files)
__fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
int retval;
- if (proc->files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
@@ -4298,7 +4311,7 @@ static int binder_thread_release(struct binder_proc *proc,
return active_transactions;
}
-static unsigned int binder_poll(struct file *filp,
+static __poll_t binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
@@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
+ mutex_lock(&proc->files_lock);
proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
@@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
files = proc->files;
if (files)
proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
}
if (defer & BINDER_DEFERRED_FLUSH)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb5339166563..a7120d621154 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -92,6 +92,25 @@ config SATA_AHCI
If unsure, say N.
+config SATA_MOBILE_LPM_POLICY
+ int "Default SATA Link Power Management policy for mobile chipsets"
+ range 0 4
+ default 0
+ depends on SATA_AHCI
+ help
+ Select the Default SATA Link Power Management (LPM) policy to use
+ for mobile / laptop variants of chipsets / "South Bridges".
+
+ The value set has the following meanings:
+ 0 => Keep firmware settings
+ 1 => Maximum performance
+ 2 => Medium power
+ 3 => Medium power with Device Initiated PM enabled
+ 4 => Minimum power
+
+ Note "Minimum power" is known to cause issues, including disk
+ corruption, with some disks and should not be used.
+
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
help
@@ -925,15 +944,6 @@ endif # ATA_BMDMA
comment "PIO-only SFF controllers"
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
-
- If unsure, say N.
-
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8daec3e657f8..f1f5a3fbc777 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
# SFF PIO only
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
obj-$(CONFIG_PATA_FALCON) += pata_falcon.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5443cb71d7ba..355a95a83a34 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -65,6 +65,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_mobile,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -140,6 +141,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_mobile] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -252,13 +260,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -268,9 +276,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -293,9 +301,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -304,28 +312,28 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -353,26 +361,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -386,6 +394,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -593,6 +606,9 @@ static int marvell_enable = 1;
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+static int mobile_lpm_policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+module_param(mobile_lpm_policy, int, 0644);
+MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
@@ -1728,6 +1744,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
+ if ((hpriv->flags & AHCI_HFLAG_IS_MOBILE) &&
+ mobile_lpm_policy >= ATA_LPM_UNKNOWN &&
+ mobile_lpm_policy <= ATA_LPM_MIN_POWER)
+ ap->target_lpm_policy = mobile_lpm_policy;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 749fd94441b0..a9d996e17d75 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -251,6 +251,9 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
+ AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
+ SATA_MOBILE_LPM_POLICY
+ as default lpm_policy */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5936d1679bf3..ea430819c80b 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -70,6 +70,13 @@
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+#define BUS_CTRL_ENDIAN_NSP_CONF \
+ (0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
+
+#define BUS_CTRL_ENDIAN_CONF_MASK \
+ (0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT | \
+ 0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
+
enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
@@ -89,14 +96,6 @@ struct brcm_ahci_priv {
enum brcm_ahci_version version;
};
-static const struct ata_port_info ahci_brcm_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
-};
-
static inline u32 brcm_sata_readreg(void __iomem *addr)
{
/*
@@ -250,20 +249,105 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+ u32 data;
/* Configure endianness */
- if (priv->version == BRCM_SATA_NSP) {
- u32 data = brcm_sata_readreg(ctrl);
-
- data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
- (0x03 << DMADESC_ENDIAN_SHIFT));
- data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
- (0x02 << DMADESC_ENDIAN_SHIFT);
- brcm_sata_writereg(data, ctrl);
- } else
- brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
+ data = brcm_sata_readreg(ctrl);
+ data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
+ if (priv->version == BRCM_SATA_NSP)
+ data |= BUS_CTRL_ENDIAN_NSP_CONF;
+ else
+ data |= BUS_CTRL_ENDIAN_CONF;
+ brcm_sata_writereg(data, ctrl);
+}
+
+static unsigned int brcm_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_host *host = ap->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int err_mask;
+ unsigned long flags;
+ int i, rc;
+ u32 ctl;
+
+ /* Try to read the device ID and, if this fails, proceed with the
+ * recovery sequence below
+ */
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (likely(!err_mask))
+ return err_mask;
+
+ /* Disable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Perform the SATA PHY reset sequence */
+ brcm_sata_phy_disable(priv, ap->port_no);
+
+ /* Bring the PHY back on */
+ brcm_sata_phy_enable(priv, ap->port_no);
+
+ /* Re-initialize and calibrate the PHY */
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_phys;
+
+ rc = phy_calibrate(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ /* Re-enable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl |= HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ata_do_dev_read_id(dev, tf, id);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+ return AC_ERR_OTHER;
+}
+
+static void brcm_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
}
+static struct ata_port_operations ahci_brcm_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = brcm_ahci_host_stop,
+ .read_id = brcm_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+ .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_brcm_platform_ops,
+};
+
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b702c20fbc2b..7ecb1322a514 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -458,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
-static struct pci_bits piix_enable_bits[] = {
+static const struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8193b38a1cae..3c09122bf038 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4449,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
deleted file mode 100644
index 9aeb7a6dd4d4..000000000000
--- a/drivers/ata/pata_at32.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * AVR32 SMC/CFC PATA Driver
- *
- * Copyright (C) 2007 Atmel Norway
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#define DEBUG
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <scsi/scsi_host.h>
-#include <linux/ata.h>
-#include <linux/libata.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/board.h>
-#include <mach/smc.h>
-
-#define DRV_NAME "pata_at32"
-#define DRV_VERSION "0.0.3"
-
-/*
- * CompactFlash controller memory layout relative to the base address:
- *
- * Attribute memory: 0000 0000 -> 003f ffff
- * Common memory: 0040 0000 -> 007f ffff
- * I/O memory: 0080 0000 -> 00bf ffff
- * True IDE Mode: 00c0 0000 -> 00df ffff
- * Alt IDE Mode: 00e0 0000 -> 00ff ffff
- *
- * Only True IDE and Alt True IDE mode are needed for this driver.
- *
- * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
- * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
- */
-#define CF_IDE_OFFSET 0x00c00000
-#define CF_ALT_IDE_OFFSET 0x00e00000
-#define CF_RES_SIZE 2048
-
-/*
- * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
- * adaptor with a logic analyzer or similar.
- */
-#undef DEBUG_BUS
-
-/*
- * ATA PIO modes
- *
- * Name | Mb/s | Min cycle time | Mask
- * --------+-------+----------------+--------
- * Mode 0 | 3.3 | 600 ns | 0x01
- * Mode 1 | 5.2 | 383 ns | 0x03
- * Mode 2 | 8.3 | 240 ns | 0x07
- * Mode 3 | 11.1 | 180 ns | 0x0f
- * Mode 4 | 16.7 | 120 ns | 0x1f
- *
- * Alter PIO_MASK below according to table to set maximal PIO mode.
- */
-enum {
- PIO_MASK = ATA_PIO4,
-};
-
-/*
- * Struct containing private information about device.
- */
-struct at32_ide_info {
- unsigned int irq;
- struct resource res_ide;
- struct resource res_alt;
- void __iomem *ide_addr;
- void __iomem *alt_addr;
- unsigned int cs;
- struct smc_config smc;
-};
-
-/*
- * Setup SMC for the given ATA timing.
- */
-static int pata_at32_setup_timing(struct device *dev,
- struct at32_ide_info *info,
- const struct ata_timing *ata)
-{
- struct smc_config *smc = &info->smc;
- struct smc_timing timing;
-
- int active;
- int recover;
-
- memset(&timing, 0, sizeof(struct smc_timing));
-
- /* Total cycle time */
- timing.read_cycle = ata->cyc8b;
-
- /* DIOR <= CFIOR timings */
- timing.nrd_setup = ata->setup;
- timing.nrd_pulse = ata->act8b;
- timing.nrd_recover = ata->rec8b;
-
- /* Convert nanosecond timing to clock cycles */
- smc_set_timing(smc, &timing);
-
- /* Add one extra cycle setup due to signal ring */
- smc->nrd_setup = smc->nrd_setup + 1;
-
- active = smc->nrd_setup + smc->nrd_pulse;
- recover = smc->read_cycle - active;
-
- /* Need at least two cycles recovery */
- if (recover < 2)
- smc->read_cycle = active + 2;
-
- /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
- smc->ncs_read_setup = 1;
- smc->ncs_read_pulse = smc->read_cycle - 2;
-
- /* Write timings same as read timings */
- smc->write_cycle = smc->read_cycle;
- smc->nwe_setup = smc->nrd_setup;
- smc->nwe_pulse = smc->nrd_pulse;
- smc->ncs_write_setup = smc->ncs_read_setup;
- smc->ncs_write_pulse = smc->ncs_read_pulse;
-
- /* Do some debugging output of ATA and SMC timings */
- dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
- ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
-
- dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
- smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
- smc->ncs_read_setup, smc->ncs_read_pulse);
-
- /* Finally, configure the SMC */
- return smc_set_configuration(info->cs, smc);
-}
-
-/*
- * Procedures for libATA.
- */
-static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing timing;
- struct at32_ide_info *info = ap->host->private_data;
-
- int ret;
-
- /* Compute ATA timing */
- ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
- if (ret) {
- dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
- return;
- }
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(ap->dev, info, &timing);
- if (ret) {
- dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
- return;
- }
-}
-
-static struct scsi_host_template at32_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations at32_port_ops = {
- .inherits = &ata_sff_port_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = pata_at32_set_piomode,
-};
-
-static int __init pata_at32_init_one(struct device *dev,
- struct at32_ide_info *info)
-{
- struct ata_host *host;
- struct ata_port *ap;
-
- host = ata_host_alloc(dev, 1);
- if (!host)
- return -ENOMEM;
-
- ap = host->ports[0];
-
- /* Setup ATA bindings */
- ap->ops = &at32_port_ops;
- ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
-
- /*
- * Since all 8-bit taskfile transfers has to go on the lower
- * byte of the data bus and there is a bug in the SMC that
- * makes it impossible to alter the bus width during runtime,
- * we need to hardwire the address signals as follows:
- *
- * A_IDE(2:0) <= A_EBI(3:1)
- *
- * This makes all addresses on the EBI even, thus all data
- * will be on the lower byte of the data bus. All addresses
- * used by libATA need to be altered according to this.
- */
- ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
- ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
-
- ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
- ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
- ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
- ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
- ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
- ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
- ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
- ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
- ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
- ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
-
- /* Set info as private data of ATA host */
- host->private_data = info;
-
- /* Register ATA device and return */
- return ata_host_activate(host, info->irq, ata_sff_interrupt,
- IRQF_SHARED | IRQF_TRIGGER_RISING,
- &at32_sht);
-}
-
-/*
- * This function may come in handy for people analyzing their own
- * EBI -> PATA adaptors.
- */
-#ifdef DEBUG_BUS
-
-static void __init pata_at32_debug_bus(struct device *dev,
- struct at32_ide_info *info)
-{
- const int d1 = 0xff;
- const int d2 = 0x00;
-
- int i;
-
- /* Write 8-bit values (registers) */
- iowrite8(d1, info->alt_addr + (0x06 << 1));
- iowrite8(d2, info->alt_addr + (0x06 << 1));
-
- for (i = 0; i < 8; i++) {
- iowrite8(d1, info->ide_addr + (i << 1));
- iowrite8(d2, info->ide_addr + (i << 1));
- }
-
- /* Write 16 bit values (data) */
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-}
-
-#endif
-
-static int __init pata_at32_probe(struct platform_device *pdev)
-{
- const struct ata_timing initial_timing =
- {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
-
- struct device *dev = &pdev->dev;
- struct at32_ide_info *info;
- struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- int irq;
- int ret;
-
- if (!board)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- /* Retrive IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- /* Setup struct containing private information */
- info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = irq;
- info->cs = board->cs;
-
- /* Request memory resources */
- info->res_ide.start = res->start + CF_IDE_OFFSET;
- info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
- info->res_ide.name = "ide";
- info->res_ide.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_ide);
- if (ret)
- goto err_req_res_ide;
-
- info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
- info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
- info->res_alt.name = "alt";
- info->res_alt.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_alt);
- if (ret)
- goto err_req_res_alt;
-
- /* Setup non-timing elements of SMC */
- info->smc.bus_width = 2; /* 16 bit data bus */
- info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
- info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
- info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
- info->smc.byte_write = 0; /* Byte select access type */
- info->smc.tdf_mode = 0; /* TDF optimization disabled */
- info->smc.tdf_cycles = 0; /* No TDF wait cycles */
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(dev, info, &initial_timing);
- if (ret)
- goto err_setup_timing;
-
- /* Map ATA address space */
- ret = -ENOMEM;
- info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
- info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
- if (!info->ide_addr || !info->alt_addr)
- goto err_ioremap;
-
-#ifdef DEBUG_BUS
- pata_at32_debug_bus(dev, info);
-#endif
-
- /* Setup and register ATA device */
- ret = pata_at32_init_one(dev, info);
- if (ret)
- goto err_ata_device;
-
- return 0;
-
- err_ata_device:
- err_ioremap:
- err_setup_timing:
- release_resource(&info->res_alt);
- err_req_res_alt:
- release_resource(&info->res_ide);
- err_req_res_ide:
- kfree(info);
-
- return ret;
-}
-
-static int __exit pata_at32_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct at32_ide_info *info;
-
- if (!host)
- return 0;
-
- info = host->private_data;
- ata_host_detach(host);
-
- if (!info)
- return 0;
-
- release_resource(&info->res_ide);
- release_resource(&info->res_alt);
-
- kfree(info);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:at32_ide");
-
-static struct platform_driver pata_at32_driver = {
- .remove = __exit_p(pata_at32_remove),
- .driver = {
- .name = "at32_ide",
- },
-};
-
-module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
-MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 49d705c9f0f7..4d49fd3c927b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,6 +278,10 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
+ /* SB600 doesn't have secondary port wired */
+ if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ ppi[1] = &ata_dummy_port_info;
+
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7a21edf89e72..8468b300193b 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -683,7 +683,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
return (u8 *)buf;
}
- mdelay(1);
+ usleep_range(500, 1000);
}
kfree(buf);
printk(KERN_ERR "it821x_firmware_command: timeout\n");
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6db2e34bd52f..1a18e675ba9f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -580,7 +580,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
- mdelay(30);
+ msleep(30);
#ifdef PDC_DEBUG
/*
@@ -620,7 +620,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
start_time = ktime_get();
/* Let the counter run for 100 ms. */
- mdelay(100);
+ msleep(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cc208b72b199..42d4589b43d4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3596,7 +3596,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
- mdelay(1);
+ usleep_range(500, 1000);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index ce47eb17901d..6470e3c4c990 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -473,7 +473,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
ENI_PRV_POS(skb) = eni_vcc->descr+size+1;
skb_queue_tail(&eni_dev->rx_queue,skb);
eni_vcc->last = skb;
-rx_enqueued++;
+ rx_enqueued++;
}
eni_vcc->descr = here;
eni_out(dma_wr,MID_DMA_WR_RX);
@@ -715,7 +715,7 @@ static void get_service(struct atm_dev *dev)
else eni_dev->slow = vcc;
eni_dev->last_slow = vcc;
}
-putting++;
+ putting++;
ENI_VCC(vcc)->servicing++;
}
}
@@ -744,7 +744,7 @@ static void dequeue_rx(struct atm_dev *dev)
}
EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb),
ENI_PRV_POS(skb));
-rx_dequeued++;
+ rx_dequeued++;
vcc = ATM_SKB(skb)->vcc;
eni_vcc = ENI_VCC(vcc);
first = 0;
@@ -1174,7 +1174,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos);
eni_out(dma_wr,MID_DMA_WR_TX);
skb_queue_tail(&eni_dev->tx_queue,skb);
-queued++;
+ queued++;
return enq_ok;
}
@@ -1195,7 +1195,7 @@ static void poll_tx(struct atm_dev *dev)
if (res == enq_ok) continue;
DPRINTK("re-queuing TX PDU\n");
skb_queue_head(&tx->backlog,skb);
-requeued++;
+ requeued++;
if (res == enq_jam) return;
break;
}
@@ -1232,7 +1232,7 @@ static void dequeue_tx(struct atm_dev *dev)
else dev_kfree_skb_irq(skb);
atomic_inc(&vcc->stats->tx);
wake_up(&eni_dev->tx_wait);
-dma_complete++;
+ dma_complete++;
}
}
@@ -1555,7 +1555,7 @@ static void eni_tasklet(unsigned long data)
}
if (events & MID_TX_COMPLETE) {
EVENT("INT: TX COMPLETE\n",0,0);
-tx_complete++;
+ tx_complete++;
wake_up(&eni_dev->tx_wait);
/* poll_rx ? */
}
@@ -2069,14 +2069,14 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
}
*(u32 *) skb->data = htonl(*(u32 *) skb->data);
}
-submitted++;
+ submitted++;
ATM_SKB(skb)->vcc = vcc;
tasklet_disable(&ENI_DEV(vcc->dev)->task);
res = do_tx(skb);
tasklet_enable(&ENI_DEV(vcc->dev)->task);
if (res == enq_ok) return 0;
skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
-backlogged++;
+ backlogged++;
tasklet_schedule(&ENI_DEV(vcc->dev)->task);
return 0;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc87907d6a1..2415ad9f6dd4 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES
config GENERIC_CPU_AUTOPROBE
bool
+config GENERIC_CPU_VULNERABILITIES
+ bool
+
config SOC_BUS
bool
select GLOB
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index eb3af2739537..07532d83be0b 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
+static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+{
+ return of_property_read_bool(this_leaf->of_node, "cache-unified");
+}
+
static void cache_of_override_properties(unsigned int cpu)
{
int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = this_cpu_ci->info_list + index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
cache_size(this_leaf);
cache_get_line_size(this_leaf);
cache_nr_sets(this_leaf);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b608d821..d99038487a0d 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
#endif
}
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+ .name = "vulnerabilities",
+ .attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
void __init cpu_dev_init(void)
{
if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
panic("Failed to register CPU subsystem");
cpu_dev_register_generic();
+ cpu_register_vulnerabilities();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 1d60b58a8c19..fe4b24f05f6a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -569,7 +569,7 @@ store_hard_offline_page(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0c80bea05bcb..528b24149bc7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
static int genpd_finish_suspend(struct device *dev, bool poweroff)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
return ret;
+ }
}
genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret = 0;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
+ return pm_generic_resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_resume(dev);
-
- ret = pm_generic_resume_noirq(dev);
- if (ret)
- return ret;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return pm_generic_resume_noirq(dev);
}
/**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_suspend(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
return ret;
}
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", 0, &pd_args);
- if (ret < 0) {
- if (ret != -ENOENT)
- return ret;
-
- /*
- * Try legacy Samsung-specific bindings
- * (for backwards compatibility of DT ABI)
- */
- pd_args.args_count = 0;
- pd_args.np = of_parse_phandle(dev->of_node,
- "samsung,power-domain", 0);
- if (!pd_args.np)
- return -ENOENT;
- }
+ if (ret < 0)
+ return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..02a497e7c785 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -526,6 +525,88 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+ dev->power.is_late_suspended = false;
+ dev->power.is_suspended = false;
+}
+
+/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+ switch (resume_msg.event) {
+ case PM_EVENT_RESUME:
+ return PMSG_SUSPEND;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RESTORE:
+ return PMSG_FREEZE;
+ case PM_EVENT_RECOVER:
+ return PMSG_HIBERNATE;
+ }
+ return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -536,8 +617,9 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
@@ -551,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
+ skip_resume = dev_pm_may_skip_resume(dev);
+
+ callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ pm_message_t suspend_msg = suspend_event(state);
+
+ /*
+ * If "freeze" callbacks have been skipped during a transition
+ * related to hibernation, the subsequent "thaw" callbacks must
+ * be skipped too or bad things may happen. Otherwise, resume
+ * callbacks are going to be run for the device, so its runtime
+ * PM status must be changed to reflect the new state after the
+ * transition under way.
+ */
+ if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+ !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+ if (state.event == PM_EVENT_THAW) {
+ skip_resume = true;
+ goto Skip;
+ } else {
+ pm_runtime_set_active(dev);
+ }
+ }
}
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_noirq_suspended = false;
- Out:
+ if (skip_resume) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
+Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
@@ -666,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
dpm_noirq_end();
}
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -676,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -691,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_resume_early_cb(dev, state, &info);
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
@@ -1074,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+ bool no_subsys_suspend_noirq)
+{
+ pm_message_t resume_msg = resume_event(state);
+
+ /*
+ * If all of the device driver's "noirq", "late" and "early" callbacks
+ * are invoked directly by the core, the decision to allow the device to
+ * stay in suspend can be based on its current runtime PM status and its
+ * wakeup settings.
+ */
+ if (no_subsys_suspend_noirq &&
+ !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+ !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+ !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+ return !pm_runtime_status_suspended(dev) &&
+ (resume_msg.event != PM_EVENT_RESUME ||
+ (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+ /*
+ * The only safe strategy here is to require that if the device may not
+ * be left in suspend, resume callbacks must be invoked for it.
+ */
+ return !dev->power.may_skip_resume;
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1085,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool no_subsys_cb = false;
int error = 0;
TRACE_DEVICE(dev);
@@ -1105,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+ if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ dev->power.must_resume = dev->power.must_resume ||
+ atomic_read(&dev->power.usage_count) > 1 ||
+ device_must_resume(dev, state, no_subsys_cb);
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1234,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
return ret;
}
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1244,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -1266,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev_pm_smart_suspend_and_suspended(dev) &&
+ !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_late_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
@@ -1420,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1485,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1528,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
- spin_unlock_irq(&parent->power.lock);
- }
- dpm_clear_suppliers_direct_complete(dev);
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
@@ -1650,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,
@@ -1663,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- dev->power.wakeup_path = device_may_wakeup(dev);
+ dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks) {
ret = 1; /* Let device go direct_complete */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 7beee75399d4..21244c53e377 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
-extern int device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
-static inline int
-device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq)
-{
- return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
static inline void device_wakeup_detach_irq(struct device *dev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6e89b51ea3d9..8bef3cb2424d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
- int ret = 0;
+ int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
@@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- if (!callback) {
- ret = -ENOSYS;
- goto err;
- }
-
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret)
goto err;
/*
- * Increase the runtime PM usage count for the device's parent, in case
- * when we find the device being used when system suspend was invoked.
- * This informs pm_runtime_force_resume() to resume the parent
- * immediately, which is needed to be able to resume its children,
- * when not deferring the resume to be managed via runtime PM.
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
*/
- if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
- pm_runtime_get_noresume(dev->parent);
+ if (pm_runtime_need_not_resume(dev))
+ pm_runtime_set_suspended(dev);
+ else
+ __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_set_suspended(dev);
return 0;
+
err:
pm_runtime_enable(dev);
return ret;
@@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
@@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
- if (!callback) {
- ret = -ENOSYS;
- goto out;
- }
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
goto out;
/*
- * Decrease the parent's runtime PM usage count, if we increased it
- * during system suspend in pm_runtime_force_suspend().
- */
- if (atomic_read(&dev->power.usage_count) > 1) {
- if (dev->parent)
- pm_runtime_put_noidle(dev->parent);
- } else {
- goto out;
- }
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
- ret = pm_runtime_set_active(dev);
- if (ret)
- goto out;
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28b1857..0f651efc58a1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
return sprintf(buf, p);
}
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
- else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ } else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sprintf(buf, "auto\n");
- else if (value == PM_QOS_LATENCY_ANY)
+ if (value == PM_QOS_LATENCY_ANY)
return sprintf(buf, "any\n");
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
if (value < 0)
return -EINVAL;
} else {
- if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
- else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
- pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
- pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
if (!device_can_wakeup(dev))
return -EINVAL;
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1
- && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+ if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof _disabled - 1
- && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
+static DEVICE_ATTR_RO(runtime_usage);
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
+static DEVICE_ATTR_RO(runtime_active_kids);
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
+ if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
+ if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+ if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
- else if (len == sizeof _disabled - 1 &&
- strncmp(buf, _disabled, len) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index ae0429827f31..a8ac86e4d79e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
- int err;
if (!dev || !wirq)
return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- err = device_wakeup_attach_irq(dev, wirq);
- if (!err)
- dev->power.wakeirq = wirq;
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
- return err;
+ return 0;
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 38559f04db2c..ea01621ed769 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,6 +19,11 @@
#include "power.h"
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
ws = wakeup_source_register(dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
*
* Call under the device's power.lock lock.
*/
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
- if (!ws) {
- dev_err(dev, "forgot to call call device_init_wakeup?\n");
- return -EINVAL;
- }
+ if (!ws)
+ return;
if (ws->wakeirq)
- return -EEXIST;
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
- return 0;
}
/**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
- if (dev->power.can_wakeup)
- device_wakeup_disable(dev);
-
+ device_wakeup_disable(dev);
device_set_wakeup_capable(dev, false);
}
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 851b1b6596a4..96aa71c93daf 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
@@ -997,6 +998,32 @@ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
/**
+ * fwnode_get_next_available_child_node - Return the next
+ * available child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ */
+struct fwnode_handle *
+fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ struct fwnode_handle *next_child = child;
+
+ if (!fwnode)
+ return NULL;
+
+ do {
+ next_child = fwnode_get_next_child_node(fwnode, next_child);
+
+ if (!next_child || fwnode_device_is_available(next_child))
+ break;
+ } while (next_child);
+
+ return next_child;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+
+/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
@@ -1126,21 +1153,21 @@ enum dev_dma_attr device_get_dma_attr(struct device *dev)
EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
- * device_get_phy_mode - Get phy mode for given device
- * @dev: Pointer to the given device
+ * fwnode_get_phy_mode - Get phy mode for given firmware node
+ * @fwnode: Pointer to the given node
*
* The function gets phy interface string from property 'phy-mode' or
* 'phy-connection-type', and return its index in phy_modes table, or errno in
* error case.
*/
-int device_get_phy_mode(struct device *dev)
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
{
const char *pm;
int err, i;
- err = device_property_read_string(dev, "phy-mode", &pm);
+ err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
if (err < 0)
- err = device_property_read_string(dev,
+ err = fwnode_property_read_string(fwnode,
"phy-connection-type", &pm);
if (err < 0)
return err;
@@ -1151,13 +1178,27 @@ int device_get_phy_mode(struct device *dev)
return -ENODEV;
}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+ return fwnode_get_phy_mode(dev_fwnode(dev));
+}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *device_get_mac_addr(struct device *dev,
+static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
const char *name, char *addr,
int alen)
{
- int ret = device_property_read_u8_array(dev, name, addr, alen);
+ int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
return addr;
@@ -1165,8 +1206,8 @@ static void *device_get_mac_addr(struct device *dev,
}
/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
+ * fwnode_get_mac_address - Get the MAC from the firmware node
+ * @fwnode: Pointer to the firmware node
* @addr: Address of buffer to store the MAC in
* @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
*
@@ -1187,23 +1228,60 @@ static void *device_get_mac_addr(struct device *dev,
* In this case, the real MAC is in 'local-mac-address', and 'mac-address'
* exists but is all zeros.
*/
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
+void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
{
char *res;
- res = device_get_mac_addr(dev, "mac-address", addr, alen);
+ res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
if (res)
return res;
- res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+ res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
if (res)
return res;
- return device_get_mac_addr(dev, "address", addr, alen);
+ return fwnode_get_mac_addr(fwnode, "address", addr, alen);
+}
+EXPORT_SYMBOL(fwnode_get_mac_address);
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev: Pointer to the device
+ * @addr: Address of buffer to store the MAC in
+ * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
+ */
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+ return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
}
EXPORT_SYMBOL(device_get_mac_address);
/**
+ * fwnode_irq_get - Get IRQ directly from a fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Zero-based index of the IRQ
+ *
+ * Returns Linux IRQ number on success. Other values are determined
+ * accordingly to acpi_/of_ irq_get() operation.
+ */
+int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
+{
+ struct device_node *of_node = to_of_node(fwnode);
+ struct resource res;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_OF) && of_node)
+ return of_irq_get(of_node, index);
+
+ ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res);
+ if (ret)
+ return ret;
+
+ return res.start;
+}
+EXPORT_SYMBOL(fwnode_irq_get);
+
+/**
* device_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
@@ -1340,3 +1418,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+void *device_get_match_data(struct device *dev)
+{
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
+ dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 3a1535d812d8..067073e4beb1 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,7 +6,6 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
- select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -39,5 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
-config REGMAP_HWSPINLOCK
- bool
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE_BUS
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0d298c446108..22d263cca395 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 8641183cac2f..53785e0e297a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -77,6 +77,7 @@ struct regmap {
int async_ret;
#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
struct dentry *debugfs;
const char *debugfs_name;
@@ -215,10 +216,17 @@ struct regmap_field {
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
#else
static inline void regmap_debugfs_initcall(void) { }
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
#endif
/* regcache core declarations */
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 4d2e50bfc726..bc6cd88b8cc6 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -37,9 +37,12 @@ static int regcache_flat_init(struct regmap *map)
cache = map->cache;
- for (i = 0; i < map->num_reg_defaults; i++)
- cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
- map->reg_defaults[i].def;
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
return 0;
}
@@ -56,8 +59,9 @@ static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[regcache_flat_get_index(map, reg)];
+ *value = cache[index];
return 0;
}
@@ -66,8 +70,9 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- cache[regcache_flat_get_index(map, reg)] = value;
+ cache[index] = value;
return 0;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce3511c733..f3266334063e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -529,6 +529,18 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
const char *devname = "dummy";
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 000000000000..50a66382d87d
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8d516a9bfc01..ee302ccdfbc8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -414,7 +414,6 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
-#ifdef REGMAP_HWSPINLOCK
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -457,7 +456,11 @@ static void regmap_unlock_hwlock_irqrestore(void *__map)
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
-#endif
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
static void regmap_lock_mutex(void *__map)
{
@@ -669,16 +672,26 @@ struct regmap *__regmap_init(struct device *dev,
goto err;
}
- if (config->lock && config->unlock) {
+ if (config->name) {
+ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+ }
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
- } else if (config->hwlock_id) {
-#ifdef REGMAP_HWSPINLOCK
+ } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
- goto err_map;
+ goto err_name;
}
switch (config->hwlock_mode) {
@@ -697,10 +710,6 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
-#else
- ret = -EINVAL;
- goto err_map;
-#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -762,14 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->name = config->name;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
- if (config->read_flag_mask || config->write_flag_mask) {
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
@@ -1116,8 +1126,10 @@ err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
err_map:
kfree(map);
err:
@@ -1305,8 +1317,9 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2423,13 +2436,15 @@ static int _regmap_bus_read(void *context, unsigned int reg,
{
int ret;
struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
if (ret == 0)
- *val = map->format.parse_val(map->work_buf);
+ *val = map->format.parse_val(work_val);
return ret;
}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 02d78f6cecbb..ba8acca036df 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"
- depends on MIPS && BCMA_DRIVER_PCI
+ depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
help
PCI core hostmode operation (external PCI bus).
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
index b1a6e327cb23..cf889fc62ac7 100644
--- a/drivers/bcma/driver_pcie2.c
+++ b/drivers/bcma/driver_pcie2.c
@@ -83,7 +83,8 @@ static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
bcma_core_pcie2_set_ltr_vals(pcie2);
/* TODO:
- si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+ *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+ */
/* enable the LTR */
devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777bdfb2..728075214959 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6619,43 +6619,27 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
#ifdef DAC960_GAM_MINOR
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
+static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
{
- long ErrorCode = 0;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- {
- DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
- (DAC960_ControllerInfo_T __user *) Argument;
DAC960_ControllerInfo_T ControllerInfo;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceControllerInfo == NULL)
ErrorCode = -EINVAL;
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;
+ goto out;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
- break;
+ goto out;
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6670,12 +6654,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
- break;
- }
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- {
- DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V1_UserCommand_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V1_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6688,39 +6672,41 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
int ControllerNumber, DataTransferLength;
unsigned char *DataTransferBuffer = NULL;
dma_addr_t DataTransferBufferDMA;
+ long ErrorCode;
+
if (UserSpaceUserCommand == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V1_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ControllerNumber = UserCommand.ControllerNumber;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
ErrorCode = -EINVAL;
if (Controller->FirmwareType != DAC960_V1_Controller)
- break;
+ goto out;
CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
DataTransferLength = UserCommand.DataTransferLength;
if (CommandOpcode & 0x80)
- break;
+ goto out;
if (CommandOpcode == DAC960_V1_DCDB)
{
if (copy_from_user(&DCDB, UserCommand.DCDB,
sizeof(DAC960_V1_DCDB_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
if (DCDB.Channel >= DAC960_V1_MaxChannels)
- break;
+ goto out;
if (!((DataTransferLength == 0 &&
DCDB.Direction
== DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6730,15 +6716,15 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
(DataTransferLength < 0 &&
DCDB.Direction
== DAC960_V1_DCDB_DataTransferSystemToDevice)))
- break;
+ goto out;
if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
!= abs(DataTransferLength))
- break;
+ goto out;
DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
if (DCDB_IOBUF == NULL) {
ErrorCode = -ENOMEM;
- break;
+ goto out;
}
}
ErrorCode = -ENOMEM;
@@ -6748,19 +6734,19 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
}
if (CommandOpcode == DAC960_V1_DCDB)
@@ -6837,12 +6823,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (DCDB_IOBUF != NULL)
pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
DCDB_IOBUF, DCDB_IOBUFDMA);
- break;
- }
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- {
- DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V2_UserCommand_T __user *) Argument;
+ out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V2_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6855,26 +6841,26 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
dma_addr_t DataTransferBufferDMA;
unsigned char *RequestSenseBuffer = NULL;
dma_addr_t RequestSenseBufferDMA;
+ long ErrorCode = -EINVAL;
- ErrorCode = -EINVAL;
if (UserSpaceUserCommand == NULL)
- break;
+ goto out;
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V2_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = UserCommand.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller){
ErrorCode = -EINVAL;
- break;
+ goto out;
}
DataTransferLength = UserCommand.DataTransferLength;
ErrorCode = -ENOMEM;
@@ -6884,14 +6870,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
@@ -7001,42 +6987,44 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (RequestSenseBuffer != NULL)
pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
RequestSenseBuffer, RequestSenseBufferDMA);
- break;
- }
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- {
- DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
- (DAC960_V2_GetHealthStatus_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
+{
DAC960_V2_GetHealthStatus_T GetHealthStatus;
DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceGetHealthStatus == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
sizeof(DAC960_V2_GetHealthStatus_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = GetHealthStatus.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&HealthStatusBuffer,
GetHealthStatus.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
@@ -7046,7 +7034,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
- break;
+ goto out;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
@@ -7054,7 +7042,39 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
else
ErrorCode = 0;
- }
+
+out:
+ return ErrorCode;
+}
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
+ unsigned long Argument)
+{
+ long ErrorCode = 0;
+ void __user *argp = (void __user *)Argument;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+
+ mutex_lock(&DAC960_mutex);
+ switch (Request)
+ {
+ case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+ ErrorCode = DAC960_ControllerCount;
+ break;
+ case DAC960_IOCTL_GET_CONTROLLER_INFO:
+ ErrorCode = DAC960_gam_get_controller_info(argp);
+ break;
+ case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v1_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v2_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+ ErrorCode = DAC960_gam_v2_get_health_status(argp);
break;
default:
ErrorCode = -ENOTTY;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 40579d0cb3d1..ad9b687a236a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -20,6 +20,10 @@ config BLK_DEV_NULL_BLK
tristate "Null test block driver"
select CONFIGFS_FS
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 9220f8e833d0..c0ebda1283cc 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -112,8 +112,7 @@ enum frame_flags {
struct frame {
struct list_head head;
u32 tag;
- struct timeval sent; /* high-res time packet was sent */
- u32 sent_jiffs; /* low-res jiffies-based sent time */
+ ktime_t sent; /* high-res time packet was sent */
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 812fed069708..540bb60cd071 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -398,8 +398,7 @@ aoecmd_ata_rw(struct aoedev *d)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -489,8 +488,7 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -499,33 +497,17 @@ resend(struct aoedev *d, struct frame *f)
static int
tsince_hr(struct frame *f)
{
- struct timeval now;
- int n;
+ u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
- do_gettimeofday(&now);
- n = now.tv_usec - f->sent.tv_usec;
- n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+ /* delta is normally under 4.2 seconds, avoid 64-bit division */
+ if (likely(delta <= UINT_MAX))
+ return (u32)delta / NSEC_PER_USEC;
- if (n < 0)
- n = -n;
+ /* avoid overflow after 71 minutes */
+ if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
+ return INT_MAX;
- /* For relatively long periods, use jiffies to avoid
- * discrepancies caused by updates to the system time.
- *
- * On system with HZ of 1000, 32-bits is over 49 days
- * worth of jiffies, or over 71 minutes worth of usecs.
- *
- * Jiffies overflow is handled by subtraction of unsigned ints:
- * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
- * $3 = 4
- * (gdb)
- */
- if (n > USEC_PER_SEC / 4) {
- n = ((u32) jiffies) - f->sent_jiffs;
- n *= USEC_PER_SEC / HZ;
- }
-
- return n;
+ return div_u64(delta, NSEC_PER_USEC);
}
static int
@@ -589,7 +571,6 @@ reassign_frame(struct frame *f)
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
- nf->sent_jiffs = f->sent_jiffs;
f->skb = skb;
return nf;
@@ -633,8 +614,7 @@ probe(struct aoetgt *t)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -1432,10 +1412,8 @@ aoecmd_ata_id(struct aoedev *d)
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
- }
+ if (skb)
+ f->sent = ktime_get();
return skb;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index bd97908c766f..9f4e6f502b84 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
- unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
+ unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4b4697a1f963..0a0394aa1b9c 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1847,7 +1847,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov = {.iov_base = buf, .iov_len = size};
- struct msghdr msg;
+ struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
int rv, sent = 0;
if (!sock)
@@ -1855,12 +1855,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
/* THINK if (signal_pending) return ... ? */
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
if (sock == connection->data.socket) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cb2fa63f6bc0..c72dee0ef083 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,7 +516,8 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e61506968..d5fe720cf149 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
return err;
}
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
{
- struct loop_device *lo = disk->private_data;
int err;
if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&lo->lo_ctl_mutex);
}
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+ mutex_lock(&loop_index_mutex);
+ __lo_release(disk->private_data);
+ mutex_unlock(&loop_index_mutex);
+}
+
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ccb9975a97fa..6655893a3a7a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -12,9 +12,9 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
-#include <linux/lightnvm.h>
#include <linux/configfs.h>
#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -27,6 +27,10 @@
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(null_timeout_attr);
+#endif
+
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
@@ -35,13 +39,13 @@ static inline u64 mb_per_tick(int mbps)
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
- call_single_data_t csd;
+ struct __call_single_data csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
+ blk_status_t error;
struct nullb_queue *nq;
struct hrtimer timer;
- blk_status_t error;
};
struct nullb_queue {
@@ -107,7 +111,6 @@ struct nullb_device {
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool use_lightnvm; /* register as a LightNVM device */
bool blocking; /* blocking blk-mq device */
bool use_per_node_hctx; /* use per-node allocation for hardware context */
bool power; /* power on/off the device */
@@ -121,7 +124,6 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
- struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
unsigned int queue_depth;
@@ -139,7 +141,6 @@ static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
-static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
enum {
@@ -166,6 +167,11 @@ static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static char g_timeout_str[80];
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+#endif
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -208,10 +214,6 @@ static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool g_use_lightnvm;
-module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
-MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
@@ -345,7 +347,6 @@ NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(use_lightnvm, bool);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
@@ -455,7 +456,6 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_use_lightnvm,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
@@ -573,7 +573,6 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocksize = g_bs;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
- dev->use_lightnvm = g_use_lightnvm;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
return dev;
@@ -1352,6 +1351,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
@@ -1369,6 +1374,16 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_DEFER;
}
+static bool should_timeout_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_timeout_str[0])
+ return should_fail(&null_timeout_attr, 1);
+#endif
+
+ return false;
+}
+
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
@@ -1376,12 +1391,20 @@ static void null_request_fn(struct request_queue *q)
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
+ if (!should_timeout_request(rq)) {
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
}
}
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1399,12 +1422,16 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
- return null_handle_cmd(cmd);
+ if (!should_timeout_request(bd->rq))
+ return null_handle_cmd(cmd);
+
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_softirq_done_fn,
+ .timeout = null_timeout_rq,
};
static void cleanup_queue(struct nullb_queue *nq)
@@ -1423,170 +1450,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-#ifdef CONFIG_NVM
-
-static void null_lnvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
- rqd->error = status ? -EIO : 0;
- nvm_end_io(rqd);
-
- blk_put_request(rq);
-}
-
-static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct bio *bio = rqd->bio;
-
- rq = blk_mq_alloc_request(q,
- op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
-
- blk_init_request_from_bio(rq, bio);
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
-
- return 0;
-}
-
-static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
-{
- struct nullb *nullb = dev->q->queuedata;
- sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- sector_t blksize;
- struct nvm_id_group *grp;
-
- id->ver_id = 0x1;
- id->vmnt = 0;
- id->cap = 0x2;
- id->dom = 0x1;
-
- id->ppaf.blk_offset = 0;
- id->ppaf.blk_len = 16;
- id->ppaf.pg_offset = 16;
- id->ppaf.pg_len = 16;
- id->ppaf.sect_offset = 32;
- id->ppaf.sect_len = 8;
- id->ppaf.pln_offset = 40;
- id->ppaf.pln_len = 8;
- id->ppaf.lun_offset = 48;
- id->ppaf.lun_len = 8;
- id->ppaf.ch_offset = 56;
- id->ppaf.ch_len = 8;
-
- sector_div(size, nullb->dev->blocksize); /* convert size to pages */
- size >>= 8; /* concert size to pgs pr blk */
- grp = &id->grp;
- grp->mtype = 0;
- grp->fmtype = 0;
- grp->num_ch = 1;
- grp->num_pg = 256;
- blksize = size;
- size >>= 16;
- grp->num_lun = size + 1;
- sector_div(blksize, grp->num_lun);
- grp->num_blk = blksize;
- grp->num_pln = 1;
-
- grp->fpg_sz = nullb->dev->blocksize;
- grp->csecs = nullb->dev->blocksize;
- grp->trdt = 25000;
- grp->trdm = 25000;
- grp->tprt = 500000;
- grp->tprm = 500000;
- grp->tbet = 1500000;
- grp->tbem = 1500000;
- grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = nullb->dev->hw_queue_depth;
-
- return 0;
-}
-
-static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
-{
- mempool_t *virtmem_pool;
-
- virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
- if (!virtmem_pool) {
- pr_err("null_blk: Unable to create virtual memory pool\n");
- return NULL;
- }
-
- return virtmem_pool;
-}
-
-static void null_lnvm_destroy_dma_pool(void *pool)
-{
- mempool_destroy(pool);
-}
-
-static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return mempool_alloc(pool, mem_flags);
-}
-
-static void null_lnvm_dev_dma_free(void *pool, void *entry,
- dma_addr_t dma_handler)
-{
- mempool_free(entry, pool);
-}
-
-static struct nvm_dev_ops null_lnvm_dev_ops = {
- .identity = null_lnvm_id,
- .submit_io = null_lnvm_submit_io,
-
- .create_dma_pool = null_lnvm_create_dma_pool,
- .destroy_dma_pool = null_lnvm_destroy_dma_pool,
- .dev_dma_alloc = null_lnvm_dev_dma_alloc,
- .dev_dma_free = null_lnvm_dev_dma_free,
-
- /* Simulate nvme protocol restriction */
- .max_phys_sect = 64,
-};
-
-static int null_nvm_register(struct nullb *nullb)
-{
- struct nvm_dev *dev;
- int rv;
-
- dev = nvm_alloc_dev(0);
- if (!dev)
- return -ENOMEM;
-
- dev->q = nullb->q;
- memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
- dev->ops = &null_lnvm_dev_ops;
-
- rv = nvm_register(dev);
- if (rv) {
- kfree(dev);
- return rv;
- }
- nullb->ndev = dev;
- return 0;
-}
-
-static void null_nvm_unregister(struct nullb *nullb)
-{
- nvm_unregister(nullb->ndev);
-}
-#else
-static int null_nvm_register(struct nullb *nullb)
-{
- pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
- return -EINVAL;
-}
-static void null_nvm_unregister(struct nullb *nullb) {}
-#endif /* CONFIG_NVM */
-
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
@@ -1595,10 +1458,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
- if (dev->use_lightnvm)
- null_nvm_unregister(nullb);
- else
- del_gendisk(nullb->disk);
+ del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
@@ -1610,8 +1470,7 @@ static void null_del_dev(struct nullb *nullb)
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!dev->use_lightnvm)
- put_disk(nullb->disk);
+ put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1775,11 +1634,6 @@ static void null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->use_lightnvm && dev->blocksize != 4096)
- dev->blocksize = 4096;
-
- if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
- dev->queue_mode = NULL_Q_MQ;
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
@@ -1805,6 +1659,20 @@ static void null_validate_conf(struct nullb_device *dev)
dev->mbps = 0;
}
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (!g_timeout_str[0])
+ return true;
+
+ if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+ return false;
+
+ null_timeout_attr.verbose = 0;
+#endif
+ return true;
+}
+
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
@@ -1838,6 +1706,10 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
+ if (!null_setup_fault())
+ goto out_cleanup_queues;
+
+ nullb->tag_set->timeout = 5 * HZ;
nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
@@ -1861,8 +1733,14 @@ static int null_add_dev(struct nullb_device *dev)
rv = -ENOMEM;
goto out_cleanup_queues;
}
+
+ if (!null_setup_fault())
+ goto out_cleanup_blk_queue;
+
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
+ nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1895,11 +1773,7 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (dev->use_lightnvm)
- rv = null_nvm_register(nullb);
- else
- rv = null_gendisk_register(nullb);
-
+ rv = null_gendisk_register(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1938,18 +1812,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (g_use_lightnvm && g_bs != 4096) {
- pr_warn("null_blk: LightNVM only supports 4k block size\n");
- pr_warn("null_blk: defaults block size to 4k\n");
- g_bs = 4096;
- }
-
- if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
- pr_warn("null_blk: LightNVM only supported for blk-mq\n");
- pr_warn("null_blk: defaults queue mode to blk-mq\n");
- g_queue_mode = NULL_Q_MQ;
- }
-
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
@@ -1982,16 +1844,6 @@ static int __init null_init(void)
goto err_conf;
}
- if (g_use_lightnvm) {
- ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
- 0, 0, NULL);
- if (!ppa_cache) {
- pr_err("null_blk: unable to create ppa cache\n");
- ret = -ENOMEM;
- goto err_ppa;
- }
- }
-
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
if (!dev) {
@@ -2015,8 +1867,6 @@ err_dev:
null_del_dev(nullb);
null_free_dev(dev);
}
- kmem_cache_destroy(ppa_cache);
-err_ppa:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
@@ -2047,8 +1897,6 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
-
- kmem_cache_destroy(ppa_cache);
}
module_init(null_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 67974796c350..531a0915066b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (ret)
+ return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- bdput(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f397fde..cc93522a6d41 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
mutex_unlock(&rbd_dev->watch_mutex);
}
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+ strcpy(rbd_dev->lock_cookie, cookie);
+ rbd_set_owner_cid(rbd_dev, &cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
/*
* lock_rwsem must be held for write
*/
static int rbd_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_client_id cid = rbd_get_cid(rbd_dev);
char cookie[32];
int ret;
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
return ret;
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
- strcpy(rbd_dev->lock_cookie, cookie);
- rbd_set_owner_cid(rbd_dev, &cid);
- queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+ __rbd_lock(rbd_dev, cookie);
return 0;
}
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->lock_dwork, 0);
} else {
- strcpy(rbd_dev->lock_cookie, cookie);
+ __rbd_lock(rbd_dev, cookie);
}
}
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
deleted file mode 100644
index e5565fbaeb30..000000000000
--- a/drivers/block/smart1,2.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-
-/*
- * This file contains the controller communication implementation for
- * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
- * this should support:
- *
- * PCI:
- * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
- * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
- *
- * EISA:
- * SMART-2/E, SMART, IAES, IDA-2, IDA
- */
-
-/*
- * Memory mapped FIFO interface (SMART 42xx cards)
- */
-static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- writel( S42XX_INTR_OFF,
- h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * For older cards FIFO Full = 0.
- * On this card 0 means there is room, anything else FIFO Full.
- *
- */
-static unsigned long smart4_fifo_full(ctlr_info_t *h)
-{
-
- return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
-}
-
-/* This type of controller returns -1 if the fifo is empty,
- * Not 0 like the others.
- * And we need to let it know we read a value out
- */
-static unsigned long smart4_completed(ctlr_info_t *h)
-{
- long register_value
- = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- /* Fifo is empty */
- if( register_value == 0xffffffff)
- return 0;
-
- /* Need to let it know we got the reply */
- /* We do this by writing a 0 to the port we just read from */
- writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- return ((unsigned long) register_value);
-}
-
- /*
- * This hardware returns interrupt pending at a different place and
- * it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the command_completed call.
- */
-static unsigned long smart4_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + S42XX_INTR_STATUS);
-
- if( register_value & S42XX_INTR_PENDING)
- return FIFO_NOT_EMPTY;
- return 0 ;
-}
-
-static struct access_method smart4_access = {
- smart4_submit_command,
- smart4_intr_mask,
- smart4_fifo_full,
- smart4_intr_pending,
- smart4_completed,
-};
-
-/*
- * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
- */
-static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + COMMAND_FIFO);
-}
-
-static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- writel(val, h->vaddr + INTR_MASK);
-}
-
-static unsigned long smart2_fifo_full(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_FIFO);
-}
-
-static unsigned long smart2_completed(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2_intr_pending(ctlr_info_t *h)
-{
- return readl(h->vaddr + INTR_PENDING);
-}
-
-static struct access_method smart2_access = {
- smart2_submit_command,
- smart2_intr_mask,
- smart2_fifo_full,
- smart2_intr_pending,
- smart2_completed,
-};
-
-/*
- * IO access for SMART-2/E cards
- */
-static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
-}
-
-static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- outl(val, h->io_mem_addr + INTR_MASK);
-}
-
-static unsigned long smart2e_fifo_full(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_FIFO);
-}
-
-static unsigned long smart2e_completed(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2e_intr_pending(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + INTR_PENDING);
-}
-
-static struct access_method smart2e_access = {
- smart2e_submit_command,
- smart2e_intr_mask,
- smart2e_fifo_full,
- smart2e_intr_pending,
- smart2e_completed,
-};
-
-/*
- * IO access for older SMART-1 type cards
- */
-#define SMART1_SYSTEM_MASK 0xC8E
-#define SMART1_SYSTEM_DOORBELL 0xC8F
-#define SMART1_LOCAL_MASK 0xC8C
-#define SMART1_LOCAL_DOORBELL 0xC8D
-#define SMART1_INTR_MASK 0xC89
-#define SMART1_LISTADDR 0xC90
-#define SMART1_LISTLEN 0xC94
-#define SMART1_TAG 0xC97
-#define SMART1_COMPLETE_ADDR 0xC98
-#define SMART1_LISTSTATUS 0xC9E
-
-#define CHANNEL_BUSY 0x01
-#define CHANNEL_CLEAR 0x02
-
-static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- /*
- * This __u16 is actually a bunch of control flags on SMART
- * and below. We want them all to be zero.
- */
- c->hdr.size = 0;
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
- outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
-
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-}
-
-static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val == 1) {
- outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
- outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
- outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
- } else {
- outb(0, h->io_mem_addr + 0xC8E);
- }
-}
-
-static unsigned long smart1_fifo_full(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
- return chan;
-}
-
-static unsigned long smart1_completed(ctlr_info_t *h)
-{
- unsigned char status;
- unsigned long cmd;
-
- if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
- status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-
- /*
- * this is x86 (actually compaq x86) only, so it's ok
- */
- if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
- } else {
- cmd = 0;
- }
- return cmd;
-}
-
-static unsigned long smart1_intr_pending(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
- return chan;
-}
-
-static struct access_method smart1_access = {
- smart1_submit_command,
- smart1_intr_mask,
- smart1_fifo_full,
- smart1_intr_pending,
- smart1_completed,
-};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba30003a..0afa6c8c3857 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
static void zram_page_end_io(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 60e1c7d6986d..07e55cd8f8c8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -31,6 +31,16 @@ config BT_HCIBTUSB
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (btusb).
+config BT_HCIBTUSB_AUTOSUSPEND
+ bool "Enable USB autosuspend for Bluetooth USB devices by default"
+ depends on BT_HCIBTUSB
+ help
+ Say Y here to enable USB autosuspend for Bluetooth USB devices by
+ default.
+
+ This can be overridden by passing btusb.enable_autosuspend=[y|n]
+ on the kernel commandline.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
@@ -67,6 +77,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
+ depends on NVMEM || !NVMEM
depends on TTY
help
Bluetooth HCI UART driver.
@@ -97,6 +108,7 @@ config BT_HCIUART_NOKIA
tristate "UART Nokia H4+ protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ depends on GPIOLIB
depends on PM
select BT_HCIUART_H4
select BT_BCM
@@ -158,6 +170,7 @@ config BT_HCIUART_3WIRE
config BT_HCIUART_INTEL
bool "Intel protocol support"
depends on BT_HCIUART
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_INTEL
help
@@ -171,6 +184,7 @@ config BT_HCIUART_BCM
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
+ depends on GPIOLIB
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d513ef4743dc..82437a69f99c 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -302,9 +302,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
}
/* Wait until the command reaches the baseband */
- prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(HZ/10);
- finish_wait(&wq, &wait);
+ mdelay(100);
/* Set baud on baseband */
info->ctrl_reg &= ~0x03;
@@ -316,9 +314,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Wait before the next HCI packet can be send */
- prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- finish_wait(&wq, &wait);
+ mdelay(1000);
}
if (len == skb->len) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 7971bfbd4321..801ea4ca65e4 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -259,7 +259,7 @@ static int bpa10x_flush(struct hci_dev *hdev)
static int bpa10x_setup(struct hci_dev *hdev)
{
- const u8 req[] = { 0x07 };
+ static const u8 req[] = { 0x07 };
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index afa4cb3b16e3..6659f113042c 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -323,6 +323,7 @@ static const struct {
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
+ { 0x2122, "BCM4343A0" }, /* 001.001.034 */
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */
{ 0x6119, "BCM4345C0" }, /* 003.001.025 */
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d9e6b41658e5..cfe6ad4cc621 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -44,8 +44,8 @@ struct bcm_set_sleep_mode {
__u8 tristate_control;
__u8 usb_auto_sleep;
__u8 usb_resume_timeout;
- __u8 pulsed_host_wake;
__u8 break_to_host;
+ __u8 pulsed_host_wake;
} __packed;
struct bcm_set_pcm_int_params {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 4459555c9d88..5270d5513201 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -75,7 +76,7 @@ EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
int btintel_enter_mfg(struct hci_dev *hdev)
{
- const u8 param[] = { 0x01, 0x00 };
+ static const u8 param[] = { 0x01, 0x00 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
@@ -569,6 +570,160 @@ struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
}
EXPORT_SYMBOL_GPL(btintel_regmap_init);
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
+{
+ struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
+ struct sk_buff *skb;
+
+ params.boot_param = cpu_to_le32(boot_param);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Failed to send Intel Reset command");
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
+
+int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*params)) {
+ bt_dev_err(hdev, "Intel boot parameters size mismatch");
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ memcpy(params, skb->data, sizeof(*params));
+
+ kfree_skb(skb);
+
+ if (params->status) {
+ bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
+ params->status);
+ return -bt_to_errno(params->status);
+ }
+
+ bt_dev_info(hdev, "Device revision is %u",
+ le16_to_cpu(params->dev_revid));
+
+ bt_dev_info(hdev, "Secure boot is %s",
+ params->secure_boot ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "OTP lock is %s",
+ params->otp_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "API lock is %s",
+ params->api_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "Debug lock is %s",
+ params->debug_lock ? "enabled" : "disabled");
+
+ bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_boot_params);
+
+int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
+ u32 *boot_param)
+{
+ int err;
+ const u8 *fw_ptr;
+ u32 frag_len;
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
+ goto done;
+ }
+
+ /* Send the 256 bytes of signature information from the firmware
+ * as the Sign fragment.
+ */
+ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
+ goto done;
+ }
+
+ fw_ptr = fw->data + 644;
+ frag_len = 0;
+
+ while (fw_ptr - fw->data < fw->size) {
+ struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+ /* Each SKU has a different reset parameter to use in the
+ * HCI_Intel_Reset command and it is embedded in the firmware
+ * data. So, instead of using static value per SKU, check
+ * the firmware data and save it for later use.
+ */
+ if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
+ /* The boot parameter is the first 32-bit value
+ * and rest of 3 octets are reserved.
+ */
+ *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
+
+ bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
+ }
+
+ frag_len += sizeof(*cmd) + cmd->plen;
+
+ /* The parameter length of the secure send command requires
+ * a 4 byte alignment. It happens so that the firmware file
+ * contains proper Intel_NOP commands to align the fragments
+ * as needed.
+ *
+ * Send set of commands with 4 byte alignment from the
+ * firmware data buffer as a single Data fragement.
+ */
+ if (!(frag_len % 4)) {
+ err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+ if (err < 0) {
+ bt_dev_err(hdev,
+ "Failed to send firmware data (%d)",
+ err);
+ goto done;
+ }
+
+ fw_ptr += frag_len;
+ frag_len = 0;
+ }
+ }
+
+done:
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_download_firmware);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 1e8955aaafed..41c642cc523f 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -69,6 +69,14 @@ struct intel_secure_send_result {
__u8 status;
} __packed;
+struct intel_reset {
+ __u8 reset_type;
+ __u8 patch_enable;
+ __u8 ddc_reload;
+ __u8 boot_option;
+ __le32 boot_param;
+} __packed;
+
#if IS_ENABLED(CONFIG_BT_INTEL)
int btintel_check_bdaddr(struct hci_dev *hdev);
@@ -89,7 +97,11 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
-
+int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param);
+int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params);
+int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
+ u32 *boot_param);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -165,4 +177,23 @@ static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
{
return ERR_PTR(-EINVAL);
}
+
+static inline int btintel_send_intel_reset(struct hci_dev *hdev,
+ u32 reset_param)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_read_boot_params(struct hci_dev *hdev,
+ struct intel_boot_params *params)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btintel_download_firmware(struct hci_dev *dev,
+ const struct firmware *fw,
+ u32 *boot_param)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 663bed63b871..2c9a5fc9137d 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -88,7 +88,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- kfree_skb(skb);
+ if (!ret)
+ kfree_skb(skb);
return ret;
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index c8e945d19ffe..20142bc77554 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
tuple = tuple->next;
}
+ /* BCM43341 devices soldered onto the PCB (non-removable) use an
+ * uart connection for bluetooth, ignore the BT SDIO interface.
+ */
+ if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+ func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+ !mmc_card_is_removable(func->card->host))
+ return -ENODEV;
+
data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f7120c9eb9bd..2a55380ad730 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/firmware.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -40,6 +41,7 @@
static bool disable_scofix;
static bool force_scofix;
+static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool reset = true;
@@ -263,6 +265,7 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@@ -270,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
@@ -387,9 +391,8 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
-#define BTUSB_RESET_RESUME 10
-#define BTUSB_DIAG_RUNNING 11
-#define BTUSB_OOB_WAKE_ENABLED 12
+#define BTUSB_DIAG_RUNNING 10
+#define BTUSB_OOB_WAKE_ENABLED 11
struct btusb_data {
struct hci_dev *hdev;
@@ -2006,15 +2009,11 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
- static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
- 0x00, 0x08, 0x04, 0x00 };
struct btusb_data *data = hci_get_drvdata(hdev);
- struct sk_buff *skb;
struct intel_version ver;
- struct intel_boot_params *params;
+ struct intel_boot_params params;
const struct firmware *fw;
- const u8 *fw_ptr;
- u32 frag_len;
+ u32 boot_param;
char fwname[64];
ktime_t calltime, delta, rettime;
unsigned long long duration;
@@ -2022,6 +2021,12 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ /* Set the default boot parameter to 0x0 and it is updated to
+ * SKU specific boot parameter after reading Intel_Write_Boot_Params
+ * command while downloading the firmware.
+ */
+ boot_param = 0x00000000;
+
calltime = ktime_get();
/* Read the Intel version information to determine if the device
@@ -2092,55 +2097,24 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
- skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*params)) {
- BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
- kfree_skb(skb);
- return -EILSEQ;
- }
-
- params = (struct intel_boot_params *)skb->data;
-
- bt_dev_info(hdev, "Device revision is %u",
- le16_to_cpu(params->dev_revid));
-
- bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
- params->min_fw_build_nn, params->min_fw_build_cw,
- 2000 + params->min_fw_build_yy);
+ err = btintel_read_boot_params(hdev, &params);
+ if (err)
+ return err;
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
- if (params->limited_cce != 0x00) {
+ if (params.limited_cce != 0x00) {
BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
- hdev->name, params->limited_cce);
- kfree_skb(skb);
+ hdev->name, params.limited_cce);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
- if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
@@ -2171,7 +2145,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
@@ -2189,7 +2163,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (err < 0) {
BT_ERR("%s: Failed to load Intel firmware file (%d)",
hdev->name, err);
- kfree_skb(skb);
return err;
}
@@ -2203,7 +2176,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ le16_to_cpu(params.dev_revid));
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
@@ -2217,8 +2190,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- kfree_skb(skb);
-
if (fw->size < 644) {
BT_ERR("%s: Invalid size of firmware file (%zu)",
hdev->name, fw->size);
@@ -2228,64 +2199,10 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
set_bit(BTUSB_DOWNLOADING, &data->flags);
- /* Start the firmware download transaction with the Init fragment
- * represented by the 128 bytes of CSS header.
- */
- err = btintel_secure_send(hdev, 0x00, 128, fw->data);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware header (%d)",
- hdev->name, err);
- goto done;
- }
-
- /* Send the 256 bytes of public key information from the firmware
- * as the PKey fragment.
- */
- err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware public key (%d)",
- hdev->name, err);
- goto done;
- }
-
- /* Send the 256 bytes of signature information from the firmware
- * as the Sign fragment.
- */
- err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware signature (%d)",
- hdev->name, err);
+ /* Start firmware downloading and get boot parameter */
+ err = btintel_download_firmware(hdev, fw, &boot_param);
+ if (err < 0)
goto done;
- }
-
- fw_ptr = fw->data + 644;
- frag_len = 0;
-
- while (fw_ptr - fw->data < fw->size) {
- struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
- frag_len += sizeof(*cmd) + cmd->plen;
-
- /* The parameter length of the secure send command requires
- * a 4 byte alignment. It happens so that the firmware file
- * contains proper Intel_NOP commands to align the fragments
- * as needed.
- *
- * Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
- */
- if (!(frag_len % 4)) {
- err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
- if (err < 0) {
- BT_ERR("%s: Failed to send firmware data (%d)",
- hdev->name, err);
- goto done;
- }
-
- fw_ptr += frag_len;
- frag_len = 0;
- }
- }
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
@@ -2338,12 +2255,9 @@ done:
set_bit(BTUSB_BOOTING, &data->flags);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
+ err = btintel_send_intel_reset(hdev, boot_param);
+ if (err)
+ return err;
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
@@ -3120,9 +3034,9 @@ static int btusb_probe(struct usb_interface *intf,
/* QCA Rome devices lose their updated firmware over suspend,
* but the USB hub doesn't notice any status change.
- * Explicitly request a device reset on resume.
+ * explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3133,7 +3047,7 @@ static int btusb_probe(struct usb_interface *intf,
* but the USB hub doesn't notice any status change.
* Explicitly request a device reset on resume.
*/
- set_bit(BTUSB_RESET_RESUME, &data->flags);
+ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
#endif
@@ -3213,6 +3127,9 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
+ if (enable_autosuspend)
+ usb_enable_autosuspend(data->udev);
+
err = hci_register_dev(hdev);
if (err < 0)
goto out_free_dev;
@@ -3299,14 +3216,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
enable_irq(data->oob_wake_irq);
}
- /* Optionally request a device reset on resume, but only when
- * wakeups are disabled. If wakeups are enabled we assume the
- * device will stay powered up throughout suspend.
- */
- if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
- !device_may_wakeup(&data->udev->dev))
- data->udev->reset_resume = 1;
-
return 0;
}
@@ -3425,6 +3334,9 @@ MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
module_param(force_scofix, bool, 0644);
MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 707c2d1b84c7..0438a64b8185 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -29,6 +29,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -52,7 +53,37 @@
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
-/* device driver resources */
+/**
+ * struct bcm_device - device driver resources
+ * @serdev_hu: HCI UART controller struct
+ * @list: bcm_device_list node
+ * @dev: physical UART slave
+ * @name: device name logged by bt_dev_*() functions
+ * @device_wakeup: BT_WAKE pin,
+ * assert = Bluetooth device must wake up or remain awake,
+ * deassert = Bluetooth device may sleep when sleep criteria are met
+ * @shutdown: BT_REG_ON pin,
+ * power up or power down Bluetooth device internal regulators
+ * @set_device_wakeup: callback to toggle BT_WAKE pin
+ * either by accessing @device_wakeup or by calling @btlp
+ * @set_shutdown: callback to toggle BT_REG_ON pin
+ * either by accessing @shutdown or by calling @btpu/@btpd
+ * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
+ * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
+ * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
+ * @clk: clock used by Bluetooth device
+ * @clk_enabled: whether @clk is prepared and enabled
+ * @init_speed: default baudrate of Bluetooth device;
+ * the host UART is initially set to this baudrate so that
+ * it can configure the Bluetooth device for @oper_speed
+ * @oper_speed: preferred baudrate of Bluetooth device;
+ * set to 0 if @init_speed is already the preferred baudrate
+ * @irq: interrupt triggered by HOST_WAKE_BT pin
+ * @irq_active_low: whether @irq is active low
+ * @hu: pointer to HCI UART controller struct,
+ * used to disable flow control during runtime suspend and system sleep
+ * @is_suspended: whether flow control is currently disabled
+ */
struct bcm_device {
/* Must be the first member, hci_serdev.c expects this. */
struct hci_uart serdev_hu;
@@ -63,6 +94,11 @@ struct bcm_device {
const char *name;
struct gpio_desc *device_wakeup;
struct gpio_desc *shutdown;
+ int (*set_device_wakeup)(struct bcm_device *, bool);
+ int (*set_shutdown)(struct bcm_device *, bool);
+#ifdef CONFIG_ACPI
+ acpi_handle btlp, btpu, btpd;
+#endif
struct clk *clk;
bool clk_enabled;
@@ -74,7 +110,7 @@ struct bcm_device {
#ifdef CONFIG_PM
struct hci_uart *hu;
- bool is_suspended; /* suspend/resume flag */
+ bool is_suspended;
#endif
};
@@ -170,11 +206,21 @@ static bool bcm_device_exists(struct bcm_device *device)
static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
{
- if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
- clk_prepare_enable(dev->clk);
+ int err;
- gpiod_set_value(dev->shutdown, powered);
- gpiod_set_value(dev->device_wakeup, powered);
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
+ err = clk_prepare_enable(dev->clk);
+ if (err)
+ return err;
+ }
+
+ err = dev->set_shutdown(dev, powered);
+ if (err)
+ goto err_clk_disable;
+
+ err = dev->set_device_wakeup(dev, powered);
+ if (err)
+ goto err_revert_shutdown;
if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
clk_disable_unprepare(dev->clk);
@@ -182,6 +228,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
dev->clk_enabled = powered;
return 0;
+
+err_revert_shutdown:
+ dev->set_shutdown(dev, !powered);
+err_clk_disable:
+ if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+ clk_disable_unprepare(dev->clk);
+ return err;
}
#ifdef CONFIG_PM
@@ -191,9 +244,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
- pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
+ pm_request_resume(bdev->dev);
return IRQ_HANDLED;
}
@@ -218,8 +269,10 @@ static int bcm_request_irq(struct bcm_data *bcm)
bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
IRQF_TRIGGER_RISING,
"host_wake", bdev);
- if (err)
+ if (err) {
+ bdev->irq = err;
goto unlock;
+ }
device_init_wakeup(bdev->dev, true);
@@ -247,8 +300,8 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
/* Irrelevant USB flags */
.usb_auto_sleep = 0,
.usb_resume_timeout = 0,
+ .break_to_host = 0,
.pulsed_host_wake = 0,
- .break_to_host = 0
};
static int bcm_setup_sleep(struct hci_uart *hu)
@@ -304,6 +357,7 @@ static int bcm_open(struct hci_uart *hu)
{
struct bcm_data *bcm;
struct list_head *p;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -318,7 +372,10 @@ static int bcm_open(struct hci_uart *hu)
mutex_lock(&bcm_device_lock);
if (hu->serdev) {
- serdev_device_open(hu->serdev);
+ err = serdev_device_open(hu->serdev);
+ if (err)
+ goto err_free;
+
bcm->dev = serdev_device_get_drvdata(hu->serdev);
goto out;
}
@@ -346,17 +403,33 @@ out:
if (bcm->dev) {
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
- bcm_gpio_set_power(bcm->dev, true);
+ err = bcm_gpio_set_power(bcm->dev, true);
+ if (err)
+ goto err_unset_hu;
}
mutex_unlock(&bcm_device_lock);
return 0;
+
+err_unset_hu:
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+#ifdef CONFIG_PM
+ else
+ bcm->dev->hu = NULL;
+#endif
+err_free:
+ mutex_unlock(&bcm_device_lock);
+ hu->priv = NULL;
+ kfree(bcm);
+ return err;
}
static int bcm_close(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
struct bcm_device *bdev = NULL;
+ int err;
bt_dev_dbg(hu->hdev, "hu %p", hu);
@@ -374,16 +447,17 @@ static int bcm_close(struct hci_uart *hu)
}
if (bdev) {
- bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM
- pm_runtime_disable(bdev->dev);
- pm_runtime_set_suspended(bdev->dev);
-
- if (device_can_wakeup(bdev->dev)) {
+ if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
devm_free_irq(bdev->dev, bdev->irq, bdev);
device_init_wakeup(bdev->dev, false);
+ pm_runtime_disable(bdev->dev);
}
-#endif
+
+ err = bcm_gpio_set_power(bdev, false);
+ if (err)
+ bt_dev_err(hu->hdev, "Failed to power down");
+ else
+ pm_runtime_set_suspended(bdev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -512,11 +586,8 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
} else if (!bcm->rx_skb) {
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
- if (bcm->dev && bcm_device_exists(bcm->dev)) {
- pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
- pm_runtime_put_autosuspend(bcm->dev->dev);
- }
+ if (bcm->dev && bcm_device_exists(bcm->dev))
+ pm_request_resume(bcm->dev->dev);
mutex_unlock(&bcm_device_lock);
}
@@ -566,6 +637,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
static int bcm_suspend_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
@@ -577,27 +649,37 @@ static int bcm_suspend_device(struct device *dev)
}
/* Suspend the device */
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, false);
- bt_dev_dbg(bdev, "suspend, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, false);
+ if (err) {
+ if (bdev->is_suspended && bdev->hu) {
+ bdev->is_suspended = false;
+ hci_uart_set_flow_control(bdev->hu, false);
+ }
+ return -EBUSY;
}
+ bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+ msleep(15);
+
return 0;
}
static int bcm_resume_device(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err;
bt_dev_dbg(bdev, "");
- if (bdev->device_wakeup) {
- gpiod_set_value(bdev->device_wakeup, true);
- bt_dev_dbg(bdev, "resume, delaying 15 ms");
- mdelay(15);
+ err = bdev->set_device_wakeup(bdev, true);
+ if (err) {
+ dev_err(dev, "Failed to power up\n");
+ return err;
}
+ bt_dev_dbg(bdev, "resume, delaying 15 ms");
+ msleep(15);
+
/* When this executes, the device has woken up already */
if (bdev->is_suspended && bdev->hu) {
bdev->is_suspended = false;
@@ -632,7 +714,7 @@ static int bcm_suspend(struct device *dev)
if (pm_runtime_active(dev))
bcm_suspend_device(dev);
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
error = enable_irq_wake(bdev->irq);
if (!error)
bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -648,6 +730,7 @@ unlock:
static int bcm_resume(struct device *dev)
{
struct bcm_device *bdev = dev_get_drvdata(dev);
+ int err = 0;
bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
@@ -662,19 +745,21 @@ static int bcm_resume(struct device *dev)
if (!bdev->hu)
goto unlock;
- if (device_may_wakeup(dev)) {
+ if (device_may_wakeup(dev) && bdev->irq > 0) {
disable_irq_wake(bdev->irq);
bt_dev_dbg(bdev, "BCM irq: disabled");
}
- bcm_resume_device(dev);
+ err = bcm_resume_device(dev);
unlock:
mutex_unlock(&bcm_device_lock);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (!err) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
return 0;
}
@@ -771,25 +856,84 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
return 0;
}
+
+static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd,
+ NULL, NULL, NULL)))
+ return -EIO;
+
+ return 0;
+}
+
+static int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev->dev);
+ const union acpi_object *obj;
+
+ if (!adev ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) ||
+ ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd)))
+ return -ENODEV;
+
+ if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) &&
+ obj->buffer.length == 8)
+ dev->init_speed = *(u64 *)obj->buffer.pointer;
+
+ dev->set_device_wakeup = bcm_apple_set_device_wakeup;
+ dev->set_shutdown = bcm_apple_set_shutdown;
+
+ return 0;
+}
+#else
+static inline int bcm_apple_get_resources(struct bcm_device *dev)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ACPI */
+static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+ gpiod_set_value(dev->device_wakeup, awake);
+ return 0;
+}
+
+static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
+{
+ gpiod_set_value(dev->shutdown, powered);
+ return 0;
+}
+
static int bcm_get_resources(struct bcm_device *dev)
{
dev->name = dev_name(dev->dev);
+ if (x86_apple_machine && !bcm_apple_get_resources(dev))
+ return 0;
+
dev->clk = devm_clk_get(dev->dev, NULL);
- dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
- "device-wakeup",
- GPIOD_OUT_LOW);
+ dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup);
- dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
- GPIOD_OUT_LOW);
+ dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
+ dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
+ dev->set_shutdown = bcm_gpio_set_shutdown;
+
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
if (dev->irq <= 0) {
struct gpio_desc *gpio;
@@ -802,7 +946,7 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->irq = gpiod_to_irq(gpio);
}
- dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
+ dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
return 0;
}
@@ -892,7 +1036,9 @@ static int bcm_probe(struct platform_device *pdev)
list_add_tail(&dev->list, &bcm_device_list);
mutex_unlock(&bcm_device_lock);
- bcm_gpio_set_power(dev, false);
+ ret = bcm_gpio_set_power(dev, false);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to power down\n");
return 0;
}
@@ -939,6 +1085,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
@@ -993,7 +1140,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
if (err)
return err;
- bcm_gpio_set_power(bcmdev, false);
+ err = bcm_gpio_set_power(bcmdev, false);
+ if (err)
+ dev_err(&serdev->dev, "Failed to power down\n");
return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index aad07e40ea4f..7c166e3b308b 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -540,18 +540,15 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
static int intel_setup(struct hci_uart *hu)
{
- static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
- 0x00, 0x08, 0x04, 0x00 };
struct intel_data *intel = hu->priv;
struct hci_dev *hdev = hu->hdev;
struct sk_buff *skb;
struct intel_version ver;
- struct intel_boot_params *params;
+ struct intel_boot_params params;
struct list_head *p;
const struct firmware *fw;
- const u8 *fw_ptr;
char fwname[64];
- u32 frag_len;
+ u32 boot_param;
ktime_t calltime, delta, rettime;
unsigned long long duration;
unsigned int init_speed, oper_speed;
@@ -563,6 +560,12 @@ static int intel_setup(struct hci_uart *hu)
hu->hdev->set_diag = btintel_set_diag;
hu->hdev->set_bdaddr = btintel_set_bdaddr;
+ /* Set the default boot parameter to 0x0 and it is updated to
+ * SKU specific boot parameter after reading Intel_Write_Boot_Params
+ * command while downloading the firmware.
+ */
+ boot_param = 0x00000000;
+
calltime = ktime_get();
if (hu->init_speed)
@@ -656,85 +659,95 @@ static int intel_setup(struct hci_uart *hu)
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
- skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
- bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
- PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*params)) {
- bt_dev_err(hdev, "Intel boot parameters size mismatch");
- kfree_skb(skb);
- return -EILSEQ;
- }
-
- params = (struct intel_boot_params *)skb->data;
- if (params->status) {
- bt_dev_err(hdev, "Intel boot parameters command failure (%02x)",
- params->status);
- err = -bt_to_errno(params->status);
- kfree_skb(skb);
+ err = btintel_read_boot_params(hdev, &params);
+ if (err)
return err;
- }
-
- bt_dev_info(hdev, "Device revision is %u",
- le16_to_cpu(params->dev_revid));
-
- bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
-
- bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
- params->min_fw_build_nn, params->min_fw_build_cw,
- 2000 + params->min_fw_build_yy);
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
- if (params->limited_cce != 0x00) {
+ if (params.limited_cce != 0x00) {
bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
- params->limited_cce);
- kfree_skb(skb);
+ params.limited_cce);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
- if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
/* With this Intel bootloader only the hardware variant and device
- * revision information are used to select the right firmware.
+ * revision information are used to select the right firmware for SfP
+ * and WsP.
*
* The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
*
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT 3.0 (LnP/SfP)
+ * 12 (0x0c) for iBT 3.5 (WsP)
+ *
+ * For ThP/JfP and for future SKU's, the FW name varies based on HW
+ * variant, HW revision and FW revision, as these are dependent on CNVi
+ * and RF Combination.
+ *
+ * 18 (0x12) for iBT3.5 (ThP/JfP)
+ *
+ * The firmware file name for these will be
+ * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
+ *
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params.dev_revid));
+ break;
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
+ return -EINVAL;
+ }
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
err);
- kfree_skb(skb);
return err;
}
bt_dev_info(hdev, "Found device firmware: %s", fwname);
/* Save the DDC file name for later */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
-
- kfree_skb(skb);
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params.dev_revid));
+ break;
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+ ver.hw_variant);
+ return -EINVAL;
+ }
if (fw->size < 644) {
bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
@@ -745,70 +758,10 @@ static int intel_setup(struct hci_uart *hu)
set_bit(STATE_DOWNLOADING, &intel->flags);
- /* Start the firmware download transaction with the Init fragment
- * represented by the 128 bytes of CSS header.
- */
- err = btintel_secure_send(hdev, 0x00, 128, fw->data);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
- goto done;
- }
-
- /* Send the 256 bytes of public key information from the firmware
- * as the PKey fragment.
- */
- err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware public key (%d)",
- err);
- goto done;
- }
-
- /* Send the 256 bytes of signature information from the firmware
- * as the Sign fragment.
- */
- err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware signature (%d)",
- err);
+ /* Start firmware downloading and get boot parameter */
+ err = btintel_download_firmware(hdev, fw, &boot_param);
+ if (err < 0)
goto done;
- }
-
- fw_ptr = fw->data + 644;
- frag_len = 0;
-
- while (fw_ptr - fw->data < fw->size) {
- struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
-
- frag_len += sizeof(*cmd) + cmd->plen;
-
- bt_dev_dbg(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
- fw->size);
-
- /* The parameter length of the secure send command requires
- * a 4 byte alignment. It happens so that the firmware file
- * contains proper Intel_NOP commands to align the fragments
- * as needed.
- *
- * Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
- */
- if (frag_len % 4)
- continue;
-
- /* Send each command from the firmware data buffer as
- * a single Data fragment.
- */
- err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send firmware data (%d)",
- err);
- goto done;
- }
-
- fw_ptr += frag_len;
- frag_len = 0;
- }
set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
@@ -869,12 +822,9 @@ done:
set_bit(STATE_BOOTING, &intel->flags);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
- HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- kfree_skb(skb);
+ err = btintel_send_intel_reset(hdev, boot_param);
+ if (err)
+ return err;
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c823914b3a80..b6a71705b7d6 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -794,7 +794,7 @@ static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e2c078d61730..1b4417a623a4 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -53,9 +53,14 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/gpio/consumer.h>
+#include <linux/nvmem-consumer.h>
#include "hci_uart.h"
+/* Vendor-specific HCI commands */
+#define HCI_VS_WRITE_BD_ADDR 0xfc06
+#define HCI_VS_UPDATE_UART_HCI_BAUDRATE 0xff36
+
/* HCILL commands */
#define HCILL_GO_TO_SLEEP_IND 0x30
#define HCILL_GO_TO_SLEEP_ACK 0x31
@@ -86,6 +91,7 @@ struct ll_device {
struct serdev_device *serdev;
struct gpio_desc *enable_gpio;
struct clk *ext_clk;
+ bdaddr_t bdaddr;
};
struct ll_struct {
@@ -620,7 +626,7 @@ static int download_firmware(struct ll_device *lldev)
case ACTION_SEND_COMMAND: /* action send */
bt_dev_dbg(lldev->hu.hdev, "S");
cmd = (struct hci_command *)action_ptr;
- if (cmd->opcode == 0xff36) {
+ if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
/* ignore remote change
* baud rate HCI VS command
*/
@@ -628,11 +634,11 @@ static int download_firmware(struct ll_device *lldev)
break;
}
if (cmd->prefix != 1)
- bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+ bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- bt_dev_err(lldev->hu.hdev, "send command failed\n");
+ bt_dev_err(lldev->hu.hdev, "send command failed");
err = PTR_ERR(skb);
goto out_rel_fw;
}
@@ -659,6 +665,24 @@ out_rel_fw:
return err;
}
+static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ bdaddr_t bdaddr_swapped;
+ struct sk_buff *skb;
+
+ /* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
+ * address to be MSB first, but bdaddr_t has the convention of being
+ * LSB first.
+ */
+ baswap(&bdaddr_swapped, bdaddr);
+ skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
+ &bdaddr_swapped, HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+
+ return PTR_ERR_OR_ZERO(skb);
+}
+
static int ll_setup(struct hci_uart *hu)
{
int err, retry = 3;
@@ -671,14 +695,20 @@ static int ll_setup(struct hci_uart *hu)
lldev = serdev_device_get_drvdata(serdev);
+ hu->hdev->set_bdaddr = ll_set_bdaddr;
+
serdev_device_set_flow_control(serdev, true);
do {
- /* Configure BT_EN to HIGH state */
+ /* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
msleep(5);
gpiod_set_value_cansleep(lldev->enable_gpio, 1);
- msleep(100);
+ err = serdev_device_wait_for_cts(serdev, true, 200);
+ if (err) {
+ bt_dev_err(hu->hdev, "Failed to get CTS");
+ return err;
+ }
err = download_firmware(lldev);
if (!err)
@@ -691,6 +721,18 @@ static int ll_setup(struct hci_uart *hu)
if (err)
return err;
+ /* Set BD address if one was specified at probe */
+ if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
+ /* This means that there was an error getting the BD address
+ * during probe, so mark the device as having a bad address.
+ */
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ } else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
+ err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
+ if (err)
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ }
+
/* Operational speed if any */
if (hu->oper_speed)
speed = hu->oper_speed;
@@ -700,7 +742,12 @@ static int ll_setup(struct hci_uart *hu)
speed = 0;
if (speed) {
- struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
if (!IS_ERR(skb)) {
kfree_skb(skb);
serdev_device_set_baudrate(serdev, speed);
@@ -716,6 +763,7 @@ static int hci_ti_probe(struct serdev_device *serdev)
{
struct hci_uart *hu;
struct ll_device *lldev;
+ struct nvmem_cell *bdaddr_cell;
u32 max_speed = 3000000;
lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
@@ -737,6 +785,52 @@ static int hci_ti_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
hci_uart_set_speeds(hu, 115200, max_speed);
+ /* optional BD address from nvram */
+ bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
+ if (IS_ERR(bdaddr_cell)) {
+ int err = PTR_ERR(bdaddr_cell);
+
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ /* ENOENT means there is no matching nvmem cell and ENOSYS
+ * means that nvmem is not enabled in the kernel configuration.
+ */
+ if (err != -ENOENT && err != -ENOSYS) {
+ /* If there was some other error, give userspace a
+ * chance to fix the problem instead of failing to load
+ * the driver. Using BDADDR_NONE as a flag that is
+ * tested later in the setup function.
+ */
+ dev_warn(&serdev->dev,
+ "Failed to get \"bd-address\" nvmem cell (%d)\n",
+ err);
+ bacpy(&lldev->bdaddr, BDADDR_NONE);
+ }
+ } else {
+ bdaddr_t *bdaddr;
+ size_t len;
+
+ bdaddr = nvmem_cell_read(bdaddr_cell, &len);
+ nvmem_cell_put(bdaddr_cell);
+ if (IS_ERR(bdaddr)) {
+ dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
+ return PTR_ERR(bdaddr);
+ }
+ if (len != sizeof(bdaddr_t)) {
+ dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
+ kfree(bdaddr);
+ return -EINVAL;
+ }
+
+ /* As per the device tree bindings, the value from nvmem is
+ * expected to be MSB first, but in the kernel it is expected
+ * that bdaddr_t is LSB first.
+ */
+ baswap(&lldev->bdaddr, bdaddr);
+ kfree(bdaddr);
+ }
+
return hci_uart_register_device(hu, &llp);
}
@@ -748,6 +842,7 @@ static void hci_ti_remove(struct serdev_device *serdev)
}
static const struct of_device_id hci_ti_of_match[] = {
+ { .compatible = "ti,cc2560" },
{ .compatible = "ti,wl1271-st" },
{ .compatible = "ti,wl1273-st" },
{ .compatible = "ti,wl1281-st" },
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbd7db7384e6..05ec530b8a3a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -932,6 +932,9 @@ static int qca_setup(struct hci_uart *hu)
if (!ret) {
set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ ret = 0;
}
/* Setup bdaddr */
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 71664b22ec9d..e0e6461b9200 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
hci_set_drvdata(hdev, hu);
INIT_WORK(&hu->write_work, hci_uart_write_work);
+ percpu_init_rwsem(&hu->proto_lock);
/* Only when vendor specific setup callback is provided, consider
* the manufacturer information valid. This avoids filling in the
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index e6f6dbc04131..0521748a1972 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -299,7 +299,7 @@ static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from)
return vhci_get_user(data, from);
}
-static unsigned int vhci_poll(struct file *file, poll_table *wait)
+static __poll_t vhci_poll(struct file *file, poll_table *wait)
{
struct vhci_data *data = file->private_data;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index dc7b3c7b7d42..57e011d36a79 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -120,7 +120,7 @@ config QCOM_EBI2
SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
- bool "Simple Power-Managed Bus Driver"
+ tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93781cf..1b76d9585902 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
+ .uevent = of_device_uevent_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 1dfb9f8de171..a2a1c1478cd0 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -236,7 +236,7 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
return ret;
}
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
{
struct apm_user *as = fp->private_data;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 0d7b577e0ff0..2f92cc46698b 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -406,7 +406,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
* Do I need this function at all???
*/
#if 0
-static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file)) & 0x0f;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 839ee61d352a..2697c22e3be2 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -91,7 +91,7 @@ static ssize_t dtlk_read(struct file *, char __user *,
size_t nbytes, loff_t * ppos);
static ssize_t dtlk_write(struct file *, const char __user *,
size_t nbytes, loff_t * ppos);
-static unsigned int dtlk_poll(struct file *, poll_table *);
+static __poll_t dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
static long dtlk_ioctl(struct file *file,
@@ -228,9 +228,9 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
return -EAGAIN;
}
-static unsigned int dtlk_poll(struct file *file, poll_table * wait)
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
{
- int mask = 0;
+ __poll_t mask = 0;
unsigned long expires;
TRACE_TEXT(" dtlk_poll");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b941e6d59fd6..dbed4953f86c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -342,7 +342,7 @@ out:
return retval;
}
-static unsigned int hpet_poll(struct file *file, poll_table * wait)
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f6e3e5abc117..4d0f571c15f9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
-config HW_RANDOM_BCM63XX
- tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX || BMIPS_GENERIC
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM63xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called bcm63xx-rng
-
- If unusure, say Y.
-
config HW_RANDOM_BCM2835
- tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM2835 SoCs.
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm2835-rng
@@ -306,19 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
-config HW_RANDOM_TPM
- tristate "TPM HW Random Number Generator support"
- depends on TCG_TPM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator in the Trusted Platform Module
-
- To compile this driver as a module, choose M here: the
- module will be called tpm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
@@ -449,6 +424,18 @@ config HW_RANDOM_S390
If unsure, say Y.
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default HW_RANDOM
+ ---help---
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f3728d008fff..b780370bd4eb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
-obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
@@ -27,7 +27,6 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
-obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 574211a49549..7a84cec30c3a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/clk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -29,116 +30,180 @@
#define RNG_INT_OFF 0x1
-static void __init nsp_rng_init(void __iomem *base)
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
{
- u32 val;
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
- /* mask the interrupt */
- val = readl(base + RNG_INT_MASK);
- val |= RNG_INT_OFF;
- writel(val, base + RNG_INT_MASK);
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
}
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
- void __iomem *rng_base = (void __iomem *)rng->priv;
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
u32 max_words = max / sizeof(u32);
u32 num_words, count;
- while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
- num_words = readl(rng_base + RNG_STATUS) >> 24;
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
if (num_words > max_words)
num_words = max_words;
for (count = 0; count < num_words; count++)
- ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
return num_words * sizeof(u32);
}
-static struct hwrng bcm2835_rng_ops = {
- .name = "bcm2835",
- .read = bcm2835_rng_read,
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
};
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng"},
- { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
- { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
{},
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
+ const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- void (*rng_setup)(void __iomem *base);
const struct of_device_id *rng_id;
- void __iomem *rng_base;
+ struct bcm2835_rng_priv *priv;
+ struct resource *r;
int err;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* map peripheral */
- rng_base = of_iomap(np, 0);
- if (!rng_base) {
- dev_err(dev, "failed to remap rng regs");
- return -ENODEV;
- }
- bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ priv->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get(dev, NULL);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id) {
- iounmap(rng_base);
+ if (!rng_id)
return -EINVAL;
- }
- /* Check for rng init function, execute it */
- rng_setup = rng_id->data;
- if (rng_setup)
- rng_setup(rng_base);
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
/* register driver */
- err = hwrng_register(&bcm2835_rng_ops);
- if (err) {
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
dev_err(dev, "hwrng registration failed\n");
- iounmap(rng_base);
- } else
+ else
dev_info(dev, "hwrng registered\n");
return err;
}
-static int bcm2835_rng_remove(struct platform_device *pdev)
-{
- void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
-
- /* disable rng hardware */
- __raw_writel(0, rng_base + RNG_CTRL);
-
- /* unregister driver */
- hwrng_unregister(&bcm2835_rng_ops);
- iounmap(rng_base);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+static struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
- .remove = bcm2835_rng_remove,
+ .id_table = bcm2835_rng_devtype,
};
module_platform_driver(bcm2835_rng_driver);
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index 5132c9cde50d..000000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Broadcom BCM63xx Random Number Generator support
- *
- * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2009, Broadcom Corporation
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/of.h>
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-struct bcm63xx_rng_priv {
- struct hwrng rng;
- struct clk *clk;
- void __iomem *regs;
-};
-
-#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
-
-static int bcm63xx_rng_init(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
- int error;
-
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val |= RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- return 0;
-}
-
-static void bcm63xx_rng_cleanup(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val &= ~RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- clk_disable_unprepare(priv->clk);
-}
-
-static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
-}
-
-static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- *data = __raw_readl(priv->regs + RNG_DATA);
-
- return 4;
-}
-
-static int bcm63xx_rng_probe(struct platform_device *pdev)
-{
- struct resource *r;
- int ret;
- struct bcm63xx_rng_priv *priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->rng.name = pdev->name;
- priv->rng.init = bcm63xx_rng_init;
- priv->rng.cleanup = bcm63xx_rng_cleanup;
- priv->rng.data_present = bcm63xx_rng_data_present;
- priv->rng.data_read = bcm63xx_rng_data_read;
-
- priv->clk = devm_clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
- return ret;
- }
-
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), pdev->name)) {
- dev_err(&pdev->dev, "request mem failed");
- return -EBUSY;
- }
-
- priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!priv->regs) {
- dev_err(&pdev->dev, "ioremap failed");
- return -ENOMEM;
- }
-
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rng device: %d\n",
- ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "registered RNG driver\n");
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id bcm63xx_rng_of_match[] = {
- { .compatible = "brcm,bcm6368-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
-#endif
-
-static struct platform_driver bcm63xx_rng_driver = {
- .probe = bcm63xx_rng_probe,
- .driver = {
- .name = "bcm63xx-rng",
- .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
- },
-};
-
-module_platform_driver(bcm63xx_rng_driver);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 657b8770b6b9..91bb98c42a1c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -306,6 +306,10 @@ static int enable_best_rng(void)
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
+ } else {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 000000000000..1947aed7c044
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = &exynos_trng_pm_ops,
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 88db42d30760..eca87249bcff 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int imx_rngc_suspend(struct device *dev)
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev)
return 0;
}
-static int imx_rngc_resume(struct device *dev)
+static int __maybe_unused imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops imx_rngc_pm_ops = {
- .suspend = imx_rngc_suspend,
- .resume = imx_rngc_resume,
-};
-#endif
+SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
-#ifdef CONFIG_PM
.pm = &imx_rngc_pm_ops,
-#endif
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8da7bcf54105..7f99cd52b40e 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
deleted file mode 100644
index d6d448266f07..000000000000
--- a/drivers/char/hw_random/tpm-rng.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 Kent Yoder IBM Corporation
- *
- * HWRNG interfaces to pull RNG data from a TPM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/hw_random.h>
-#include <linux/tpm.h>
-
-#define MODULE_NAME "tpm-rng"
-
-static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- return tpm_get_random(TPM_ANY_NUM, data, max);
-}
-
-static struct hwrng tpm_rng = {
- .name = MODULE_NAME,
- .read = tpm_rng_read,
-};
-
-static int __init rng_init(void)
-{
- return hwrng_register(&tpm_rng);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- hwrng_unregister(&tpm_rng);
-}
-module_exit(rng_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("RNG driver for TPM devices");
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6edfaa72b98b..7992c870b0a2 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -338,10 +338,10 @@ static int bt_bmc_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
{
struct bt_bmc *bt_bmc = file_bt_bmc(file);
- unsigned int mask = 0;
+ __poll_t mask = 0;
u8 ctrl;
poll_wait(file, &bt_bmc->queue, wait);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2ffca4232686..a011a7739f5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -78,10 +78,10 @@ static void file_receive_handler(struct ipmi_recv_msg *msg,
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
struct ipmi_file_private *priv = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, &priv->wait, wait);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ab78b3be7e33..c5112b17d7ea 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -106,7 +106,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
- pdev->driver_override = override;
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ override);
+ if (!pdev->driver_override)
+ goto err;
if (type == IPMI_DMI_TYPE_SSIF) {
set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f45732a2cb3e..01fbffb3168e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -84,7 +84,7 @@ static int panic_op_write_handler(const char *val,
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 07fddbefefe4..bcf493d8e238 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -250,8 +250,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
ipmi->irq = opal_event_request(prop);
}
- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
- "opal-ipmi", ipmi)) {
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
dev_warn(dev, "Unable to request irq\n");
goto err_dispose;
}
@@ -264,7 +265,6 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
goto err_unregister;
}
- /* todo: query actual ipmi_device_id */
rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71fad747c0c7..6768cb2dd740 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1938,8 +1938,10 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL)
+ if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
smi_info->timer_can_start = false;
if (smi_info->timer_running)
@@ -2045,6 +2047,7 @@ static int try_smi_init(struct smi_info *new_smi)
int rv = 0;
int i;
char *init_name = NULL;
+ bool platform_device_registered = false;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2173,6 +2176,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv);
goto out_err;
}
+ platform_device_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
@@ -2279,10 +2283,11 @@ out_err:
}
if (new_smi->pdev) {
- platform_device_unregister(new_smi->pdev);
+ if (platform_device_registered)
+ platform_device_unregister(new_smi->pdev);
+ else
+ platform_device_put(new_smi->pdev);
new_smi->pdev = NULL;
- } else if (new_smi->pdev) {
- platform_device_put(new_smi->pdev);
}
kfree(init_name);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 9573f1116450..f4214870d726 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
#endif
#ifdef CONFIG_OF
module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the"
" default scan of the interfaces identified via OpenFirmware");
#endif
#ifdef CONFIG_DMI
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3cfaec728604..f929e72bdac8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2071,8 +2071,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- if (addr_info->client)
- i2c_unregister_device(addr_info->client);
+ i2c_unregister_device(addr_info->client);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 76b270678b50..34bc1f3ca414 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -232,7 +232,7 @@ static int set_param_str(const char *val, const struct kernel_param *kp)
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
@@ -298,7 +298,7 @@ module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
module_param(panic_wdt_timeout, timeout, 0644);
-MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
@@ -887,9 +887,9 @@ static int ipmi_open(struct inode *ino, struct file *filep)
}
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &read_q, wait);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 9a1aaf538758..819fe37a3683 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -415,10 +415,10 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
return count;
}
-static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
+static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
{
struct reader_dev *dev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dev->poll_wait, wait);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d256110ba672..7a56d1a13ec3 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -769,10 +769,10 @@ static int pp_release(struct inode *inode, struct file *file)
}
/* No kernel lock held - fine */
-static unsigned int pp_poll(struct file *file, poll_table *wait)
+static __poll_t pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &pp->irq_wait, wait);
if (atomic_read(&pp->irqc))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ec42c8bb9b0d..80f2c326db47 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,9 +431,9 @@ static int crng_init = 0;
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+ __u32 out[CHACHA20_BLOCK_WORDS]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 block[CHACHA20_BLOCK_WORDS];
__u32 key[8];
} buf;
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u32 out[CHACHA20_BLOCK_WORDS])
{
unsigned long v, flags;
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
{
struct crng_state *crng = NULL;
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = &tmp[used / sizeof(__u32)];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
struct crng_state *crng = NULL;
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1784,10 +1784,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static unsigned int
+static __poll_t
random_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
+ extract_crng((__u32 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
+ extract_crng(batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5542a438bbd0..c6a317120a55 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -147,7 +147,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait);
+static __poll_t rtc_poll(struct file *file, poll_table *wait);
#endif
static void get_rtc_alm_time(struct rtc_time *alm_tm);
@@ -790,7 +790,7 @@ no_irq:
}
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 6aa32679fd58..7f49fa0f41d7 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -321,10 +321,10 @@ scdrv_write(struct file *file, const char __user *buf,
return status;
}
-static unsigned int
+static __poll_t
scdrv_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status = 0;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d3a979e25724..fc041c462aa4 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -940,7 +940,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
if (kfifo_len(&sonypi_device.fifo))
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a30352202f1f..18c81cbe4704 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -26,6 +26,17 @@ menuconfig TCG_TPM
if TCG_TPM
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ ---help---
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
config TCG_TIS_CORE
tristate
---help---
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 34b4bcf46f43..acd758381c58 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,8 +6,9 @@ obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
-tpm-$(CONFIG_OF) += tpm_of.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
+tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
+tpm-$(CONFIG_OF) += tpm_eventlog_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0eca20c5a80c..0a62c19937b6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,8 +26,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
DEFINE_IDR(dev_nums_idr);
static DEFINE_MUTEX(idr_lock);
@@ -80,21 +81,26 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - return tpm_chip for a given chip number
- * @chip_num: id to find
+ * tpm_chip_find_get() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * The return'd chip has been tpm_try_get_ops'd and must be released via
- * tpm_put_ops
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_chip_put_ops() after use.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
{
- struct tpm_chip *chip, *res = NULL;
+ struct tpm_chip *res = NULL;
+ int chip_num = 0;
int chip_prev;
mutex_lock(&idr_lock);
- if (chip_num == TPM_ANY_NUM) {
- chip_num = 0;
+ if (!chip) {
do {
chip_prev = chip_num;
chip = idr_get_next(&dev_nums_idr, &chip_num);
@@ -104,8 +110,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find(&dev_nums_idr, chip_num);
- if (chip && !tpm_try_get_ops(chip))
+ if (!tpm_try_get_ops(chip))
res = chip;
}
@@ -387,6 +392,26 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ return tpm_get_random(chip, data, max);
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -419,11 +444,13 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
rc = tpm_add_char_device(chip);
- if (rc) {
- tpm_bios_log_teardown(chip);
- return rc;
- }
+ if (rc)
+ goto out_hwrng;
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
@@ -432,6 +459,14 @@ int tpm_chip_register(struct tpm_chip *chip)
}
return 0;
+
+out_hwrng:
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -451,6 +486,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
cdev_device_del(&chip->cdevs, &chip->devs);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1d6729be4cd6..76df4fbcf089 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -30,9 +30,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/pm_runtime.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
#define TPM_MAX_ORDINAL 243
#define TSC_MAX_ORDINAL 12
@@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-static bool tpm_validate_command(struct tpm_chip *chip,
+static int tpm_validate_command(struct tpm_chip *chip,
struct tpm_space *space,
const u8 *cmd,
size_t len)
@@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
unsigned int nr_handles;
if (len < TPM_HEADER_SIZE)
- return false;
+ return -EINVAL;
if (!space)
- return true;
+ return 0;
if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
cc = be32_to_cpu(header->ordinal);
@@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
if (i < 0) {
dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
cc);
- return false;
+ return -EOPNOTSUPP;
}
attrs = chip->cc_attrs_tbl[i];
@@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
goto err_len;
}
- return true;
+ return 0;
err_len:
dev_dbg(&chip->dev,
"%s: insufficient command length %zu", __func__, len);
- return false;
+ return -EINVAL;
}
/**
@@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
unsigned long stop;
bool need_locality;
- if (!tpm_validate_command(chip, space, buf, bufsiz))
- return -EINVAL;
+ rc = tpm_validate_command(chip, space, buf, bufsiz);
+ if (rc == -EINVAL)
+ return rc;
+ /*
+ * If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (rc == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ return bufsiz;
+ }
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
@@ -413,6 +425,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (chip->dev.parent)
pm_runtime_get_sync(chip->dev.parent);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
@@ -489,6 +504,9 @@ out:
chip->locality = -1;
}
out_no_locality:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
if (chip->dev.parent)
pm_runtime_put_sync(chip->dev.parent);
@@ -809,19 +827,20 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
}
/**
- * tpm_is_tpm2 - is the chip a TPM2 chip?
- * @chip_num: tpm idx # or ANY
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * Returns < 0 on error, and 1 or 0 on success depending whether the chip
- * is a TPM2 chip.
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
*/
-int tpm_is_tpm2(u32 chip_num)
+int tpm_is_tpm2(struct tpm_chip *chip)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
@@ -833,23 +852,19 @@ int tpm_is_tpm2(u32 chip_num)
EXPORT_SYMBOL_GPL(tpm_is_tpm2);
/**
- * tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
- * @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @res_buf: the value of the PCR
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
@@ -889,25 +904,26 @@ static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
}
/**
- * tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
- * @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @hash: the hash value used to extend the PCR value
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Note: with TPM 2.0 extends also those banks with a known digest size to the
+ * cryto subsystem in order to prevent malicious use of those PCR banks. In the
+ * future we should dynamically determine digest sizes.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
{
int rc;
- struct tpm_chip *chip;
struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1019,82 +1035,29 @@ out:
return rc;
}
-int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
- "attempting tpm_cmd");
+ "attempting to a send a command");
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
- bool check_cancel, bool *canceled)
-{
- u8 status = chip->ops->status(chip);
-
- *canceled = false;
- if ((status & mask) == mask)
- return true;
- if (check_cancel && chip->ops->req_canceled(chip, status)) {
- *canceled = true;
- return true;
- }
- return false;
-}
-
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel)
-{
- unsigned long stop;
- long rc;
- u8 status;
- bool canceled = false;
-
- /* check current status */
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- wait_for_tpm_stat_cond(chip, mask, check_cancel,
- &canceled),
- timeout);
- if (rc > 0) {
- if (canceled)
- return -ECANCELED;
- return 0;
- }
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- tpm_msleep(TPM_TIMEOUT);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-
#define TPM_ORD_SAVESTATE 152
#define SAVESTATE_RESULT_SIZE 10
@@ -1187,16 +1150,15 @@ static const struct tpm_input_header tpm_getrandom_header = {
};
/**
- * tpm_get_random() - Get random bytes from the tpm's RNG
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
*
- * Returns < 0 on error and the number of bytes read on success
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
{
- struct tpm_chip *chip;
struct tpm_cmd_t tpm_cmd;
u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
int err, total = 0, retries = 5;
@@ -1205,8 +1167,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1248,22 +1210,23 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
EXPORT_SYMBOL_GPL(tpm_get_random);
/**
- * tpm_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * tpm_seal_trusted() - seal a trusted key payload
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_seal_trusted(chip, payload, options);
@@ -1275,21 +1238,23 @@ EXPORT_SYMBOL_GPL(tpm_seal_trusted);
/**
* tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_unseal_trusted(chip, payload, options);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 528cffbd49d3..f895fba4e20d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
@@ -34,6 +35,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include <linux/tpm_eventlog.h>
#include <crypto/hash_info.h>
#ifdef CONFIG_X86
@@ -93,12 +95,17 @@ enum tpm2_structures {
TPM2_ST_SESSIONS = 0x8002,
};
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
};
@@ -210,6 +217,9 @@ struct tpm_chip {
int dev_num; /* /dev/tpm# */
unsigned long is_open; /* only one allowed */
+ char hwrng_name[64];
+ struct hwrng hwrng;
+
struct mutex tpm_mutex; /* tpm is processing */
unsigned long timeout_a; /* jiffies */
@@ -385,10 +395,6 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
-struct tpm2_digest {
- u16 alg_id;
- u8 digest[SHA512_DIGEST_SIZE];
-} __packed;
/* A string buffer type for constructing TPM commands. This is based on the
* ideas of string buffer code in security/keys/trusted.h but is heap based
@@ -512,16 +518,14 @@ int tpm_do_selftest(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel);
static inline void tpm_msleep(unsigned int delay_msec)
{
- usleep_range(delay_msec * 1000,
- (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(int chip_num);
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -575,4 +579,34 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
u8 *cmd);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
+
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/tpm1_eventlog.c
index 9a8605e500b5..add798bd69d0 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/tpm1_eventlog.c
@@ -21,13 +21,14 @@
*/
#include <linux/seq_file.h>
+#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
static const char* tcpa_event_type_strings[] = {
@@ -371,6 +372,10 @@ static int tpm_read_log(struct tpm_chip *chip)
if (rc != -ENODEV)
return rc;
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
return tpm_read_log_of(chip);
}
@@ -388,11 +393,13 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
+ int log_version;
int rc = 0;
rc = tpm_read_log(chip);
- if (rc)
+ if (rc < 0)
return rc;
+ log_version = rc;
cnt = 0;
chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
@@ -404,7 +411,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
chip->bin_log_seqops.chip = chip;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
chip->bin_log_seqops.seqops =
&tpm2_binary_b_measurements_seqops;
else
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f40d20671a78..c17e75348a99 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -849,28 +849,26 @@ static const struct tpm_input_header tpm2_selftest_header = {
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int delay_msec = 20;
+ unsigned int delay_msec = 10;
long duration;
struct tpm2_cmd cmd;
duration = jiffies_to_msecs(
tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- while (duration > 0) {
+ while (1) {
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = 0;
rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
0, 0, "continue selftest");
- if (rc != TPM2_RC_TESTING)
+ if (rc != TPM2_RC_TESTING || delay_msec >= duration)
break;
- tpm_msleep(delay_msec);
- duration -= delay_msec;
-
- /* wait longer the next round */
+ /* wait longer than before */
delay_msec *= 2;
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c
index 34a8afa69138..1ce4411292ba 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/tpm2_eventlog.c
@@ -21,9 +21,9 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
deleted file mode 100644
index 204466cc4d05..000000000000
--- a/drivers/char/tpm/tpm_eventlog.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __TPM_EVENTLOG_H__
-#define __TPM_EVENTLOG_H__
-
-#include <crypto/hash_info.h>
-
-#define TCG_EVENT_NAME_LEN_MAX 255
-#define MAX_TEXT_EVENT 1000 /* Max event string length */
-#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
-#define TPM2_ACTIVE_PCR_BANKS 3
-
-#ifdef CONFIG_PPC64
-#define do_endian_conversion(x) be32_to_cpu(x)
-#else
-#define do_endian_conversion(x) x
-#endif
-
-enum bios_platform_class {
- BIOS_CLIENT = 0x00,
- BIOS_SERVER = 0x01,
-};
-
-struct tcpa_event {
- u32 pcr_index;
- u32 event_type;
- u8 pcr_value[20]; /* SHA1 */
- u32 event_size;
- u8 event_data[0];
-};
-
-enum tcpa_event_types {
- PREBOOT = 0,
- POST_CODE,
- UNUSED,
- NO_ACTION,
- SEPARATOR,
- ACTION,
- EVENT_TAG,
- SCRTM_CONTENTS,
- SCRTM_VERSION,
- CPU_MICROCODE,
- PLATFORM_CONFIG_FLAGS,
- TABLE_OF_DEVICES,
- COMPACT_HASH,
- IPL,
- IPL_PARTITION_DATA,
- NONHOST_CODE,
- NONHOST_CONFIG,
- NONHOST_INFO,
-};
-
-struct tcpa_pc_event {
- u32 event_id;
- u32 event_size;
- u8 event_data[0];
-};
-
-enum tcpa_pc_event_ids {
- SMBIOS = 1,
- BIS_CERT,
- POST_BIOS_ROM,
- ESCD,
- CMOS,
- NVRAM,
- OPTION_ROM_EXEC,
- OPTION_ROM_CONFIG,
- OPTION_ROM_MICROCODE = 10,
- S_CRTM_VERSION,
- S_CRTM_CONTENTS,
- POST_CONTENTS,
- HOST_TABLE_OF_DEVICES,
-};
-
-/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
-
-struct tcg_efi_specid_event_algs {
- u16 alg_id;
- u16 digest_size;
-} __packed;
-
-struct tcg_efi_specid_event {
- u8 signature[16];
- u32 platform_class;
- u8 spec_version_minor;
- u8 spec_version_major;
- u8 spec_errata;
- u8 uintnsize;
- u32 num_algs;
- struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
- u8 vendor_info_size;
- u8 vendor_info[0];
-} __packed;
-
-struct tcg_pcr_event {
- u32 pcr_idx;
- u32 event_type;
- u8 digest[20];
- u32 event_size;
- u8 event[0];
-} __packed;
-
-struct tcg_event_field {
- u32 event_size;
- u8 event[0];
-} __packed;
-
-struct tcg_pcr_event2 {
- u32 pcr_idx;
- u32 event_type;
- u32 count;
- struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
- struct tcg_event_field event;
-} __packed;
-
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
-int tpm_bios_log_setup(struct tpm_chip *chip);
-void tpm_bios_log_teardown(struct tpm_chip *chip);
-
-#endif
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_eventlog_acpi.c
index 169edf3ce86d..66f19e93c216 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_eventlog_acpi.c
@@ -25,9 +25,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
@@ -102,7 +102,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
- return 0;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
err:
kfree(log->bios_event_log);
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/tpm_eventlog_efi.c
new file mode 100644
index 000000000000..e3f9ffd341d2
--- /dev/null
+++ b/drivers/char/tpm/tpm_eventlog_efi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ goto err_memunmap;
+ memcpy(log->bios_event_log, log_tbl->log, log_size);
+ log->bios_event_log_end = log->bios_event_log + log_size;
+
+ tpm_log_version = log_tbl->version;
+ memunmap(log_tbl);
+ return tpm_log_version;
+
+err_memunmap:
+ memunmap(log_tbl);
+ return -ENOMEM;
+}
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_eventlog_of.c
index aadb7f464076..96fd5646f866 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_eventlog_of.c
@@ -17,9 +17,9 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -76,5 +76,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
memcpy(log->bios_event_log, __va(base), size);
- return 0;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 79d6bbb58e39..c1dd39eaaeeb 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -665,9 +665,9 @@ out_err:
}
static const struct i2c_device_id tpm_tis_i2c_table[] = {
- {"tpm_i2c_infineon", 0},
- {"slb9635tt", 0},
- {"slb9645tt", 1},
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
{},
};
@@ -675,24 +675,9 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
#ifdef CONFIG_OF
static const struct of_device_id tpm_tis_i2c_of_match[] = {
- {
- .name = "tpm_i2c_infineon",
- .type = "tpm",
- .compatible = "infineon,tpm_i2c_infineon",
- .data = (void *)0
- },
- {
- .name = "slb9635tt",
- .type = "tpm",
- .compatible = "infineon,slb9635tt",
- .data = (void *)0
- },
- {
- .name = "slb9645tt",
- .type = "tpm",
- .compatible = "infineon,slb9645tt",
- .data = (void *)1
- },
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
{},
};
MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e2d1055fb814..f08949a5f678 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
}
#endif
-#ifdef CONFIG_X86
-#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
-#define ILB_REMAP_SIZE 0x100
-#define LPC_CNTRL_REG_OFFSET 0x84
-#define LPC_CLKRUN_EN (1 << 2)
-
-static void __iomem *ilb_base_addr;
-
-static inline bool is_bsw(void)
-{
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
-}
-
-/**
- * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
- */
-static void tpm_platform_begin_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Disable LPC CLKRUN# */
- clkrun_val &= ~LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-
-/**
- * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
- */
-static void tpm_platform_end_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Enable LPC CLKRUN# */
- clkrun_val |= LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-#else
-static inline bool is_bsw(void)
-{
- return false;
-}
-
-static void tpm_platform_begin_xfer(void)
-{
-}
-
-static void tpm_platform_end_xfer(void)
-{
-}
-#endif
-
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
*result++ = ioread8(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
iowrite8(*value++, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread16(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread32(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
iowrite32(value, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -461,11 +366,6 @@ static int __init init_tis(void)
if (rc)
goto err_force;
-#ifdef CONFIG_X86
- if (is_bsw())
- ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
- ILB_REMAP_SIZE);
-#endif
rc = platform_driver_register(&tis_drv);
if (rc)
goto err_platform;
@@ -484,10 +384,6 @@ err_pnp:
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
err_force:
return rc;
}
@@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
pnp_unregister_driver(&tis_pnp_driver);
platform_driver_unregister(&tis_drv);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
if (force_pdev)
platform_device_unregister(force_pdev);
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdde971bc810..183a5f54d875 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,6 +31,74 @@
#include "tpm.h"
#include "tpm_tis_core.h"
+/* This is a polling delay to check for status and burstcount.
+ * As per ddwg input, expectation is that status check and burstcount
+ * check should return within few usecs.
+ */
+#define TPM_POLL_SLEEP 1 /* msec */
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_POLL_SLEEP);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
/* Before we attempt to access the TPM we must see that the valid bit is set.
* The specification says that this bit is 0 at reset and remains 0 until the
* 'TPM has gone through its self test and initialization and has established
@@ -164,7 +232,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_POLL_SLEEP);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -421,19 +489,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
int i, rc;
u32 did_vid;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
if (rc < 0)
- return rc;
+ goto out;
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
- return true;
+ rc = true;
}
- return false;
+ rc = false;
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return rc;
}
/*
@@ -653,14 +730,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
u32 interrupt;
int rc;
+ tpm_tis_clkrun_enable(chip, true);
+
rc = tpm_tis_read32(priv, reg, &interrupt);
if (rc < 0)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
}
EXPORT_SYMBOL_GPL(tpm_tis_remove);
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
static const struct tpm_class_ops tpm_tis = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = tpm_tis_status,
@@ -673,13 +809,17 @@ static const struct tpm_class_ops tpm_tis = {
.req_canceled = tpm_tis_req_canceled,
.request_locality = request_locality,
.relinquish_locality = release_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
};
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
acpi_handle acpi_dev_handle)
{
- u32 vendor, intfcaps, intmask;
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
u8 rid;
int rc, probe;
struct tpm_chip *chip;
@@ -700,6 +840,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
dev_set_drvdata(&chip->dev, priv);
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -790,9 +947,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
}
}
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
out_err:
+ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ chip->ops->clk_enable(chip, false);
+
tpm_tis_remove(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_tis_core_init);
@@ -804,22 +972,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* reenable interrupts that device may have lost or
* BIOS/firmware may have disabled
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
if (rc < 0)
- return;
+ goto out;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- return;
+ goto out;
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
}
int tpm_tis_resume(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6bbac319ff3b..d5c6a2e952b3 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -79,6 +79,11 @@ enum tis_defaults {
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
};
@@ -89,6 +94,8 @@ struct tpm_tis_data {
int irq;
bool irq_tested;
unsigned int flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
@@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return data->phy_ops->write32(data, addr, value);
}
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
void tpm_tis_remove(struct tpm_chip *chip);
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 1d877cc9af97..674218b50b13 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -173,10 +173,10 @@ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
*
* Return: Poll flags
*/
-static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
{
struct proxy_dev *proxy_dev = filp->private_data;
- unsigned ret;
+ __poll_t ret;
poll_wait(filp, &proxy_dev->wq, wait);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 656e8af95d52..911475d36800 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/freezer.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/io/tpmif.h>
@@ -39,6 +40,66 @@ enum status_bits {
VTPM_STATUS_CANCELED = 0x8,
};
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
static u8 vtpm_status(struct tpm_chip *chip)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d1aed2513bd9..813a2e46824d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -982,10 +982,10 @@ error_out:
return ret;
}
-static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
- unsigned int ret;
+ __poll_t ret;
port = filp->private_data;
poll_wait(filp, &port->waitqueue, wait);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b6c9cdead7f3..88e1cf475d3f 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1736,10 +1736,10 @@ end:
return pos;
}
-static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
{
struct xilly_channel *channel = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(filp, &channel->endpoint->ep_wait, wait);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 647d056df88c..b56c11f51baf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
ret = core->ops->is_enabled(core->hw);
done:
- clk_pm_runtime_put(core);
+ if (core->dev)
+ pm_runtime_put(core->dev);
return ret;
}
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
best_parent_rate = core->parent->rate;
}
+ if (clk_pm_runtime_get(core))
+ return;
+
if (core->flags & CLK_SET_RATE_UNGATE) {
unsigned long flags;
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
/* handle the new child who might not be in core->children yet */
if (core->new_child)
clk_change_rate(core->new_child);
+
+ clk_pm_runtime_put(core);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a1a634253d6f..f00d8758ba24 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ sun9i_mmc_reset_assert(rcdev, id);
+ udelay(10);
+ sun9i_mmc_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
static const struct reset_control_ops sun9i_mmc_reset_ops = {
.assert = sun9i_mmc_reset_assert,
.deassert = sun9i_mmc_reset_deassert,
+ .reset = sun9i_mmc_reset_reset,
};
static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c729a88007d0..b3b4ed9b6874 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+ select TIMER_OF
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
@@ -441,6 +442,13 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config SPRD_TIMER
+ bool "Spreadtrum timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ help
+ Enables support for the Spreadtrum timer driver.
+
config SYS_SUPPORTS_SH_MTU2
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 72711f1491e3..d6dec4489d66 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index c68630565079..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0;
}
-CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
-CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 9de47d4d2d9e..43f4d5c4d6fa 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
- ((divided_rate + 500000) % 1000000) / 1000);
+ ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index a31990408153..06ed88a2a8a0 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -24,7 +24,13 @@
#include "timer-of.h"
-static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_exit - Release the interrupt
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Free the irq resource
+ */
+static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -34,8 +40,24 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt);
}
-static __init int timer_irq_init(struct device_node *np,
- struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_init - Request the interrupt
+ * @np: a device tree node pointer
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Get the interrupt number from the DT from its definition and
+ * request it. The interrupt is gotten by falling back the following way:
+ *
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+ * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+ * otherwise 'request_irq()' is used.
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_irq_init(struct device_node *np,
+ struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -72,15 +94,30 @@ static __init int timer_irq_init(struct device_node *np,
return 0;
}
-static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_exit - Release the clock resources
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Disables and releases the refcount on the clk
+ */
+static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
-static __init int timer_clk_init(struct device_node *np,
- struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_init - Initialize the clock resources
+ * @np: a device tree node pointer
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Get the clock by name or by index, enable it and get the rate
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_clk_init(struct device_node *np,
+ struct of_timer_clk *of_clk)
{
int ret;
@@ -116,19 +153,19 @@ out_clk_put:
goto out;
}
-static __init void timer_base_exit(struct of_timer_base *of_base)
+static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
-static __init int timer_base_init(struct device_node *np,
- struct of_timer_base *of_base)
+static __init int timer_of_base_init(struct device_node *np,
+ struct of_timer_base *of_base)
{
- const char *name = of_base->name ? of_base->name : np->full_name;
-
- of_base->base = of_io_request_and_map(np, of_base->index, name);
+ of_base->base = of_base->name ?
+ of_io_request_and_map(np, of_base->index, of_base->name) :
+ of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", name);
+ pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base);
}
@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
- ret = timer_base_init(np, &to->of_base);
+ ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
- ret = timer_clk_init(np, &to->of_clk);
+ ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
- ret = timer_irq_init(np, &to->of_irq);
+ ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name)
to->clkevt.name = np->name;
+
+ to->np = np;
+
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
return ret;
}
@@ -187,11 +227,11 @@ out_fail:
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 3f708f1be43d..a5478f3e8589 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of {
unsigned int flags;
+ struct device_node *np;
struct clock_event_device clkevt;
struct of_timer_base of_base;
struct of_timer_irq of_irq;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
new file mode 100644
index 000000000000..ef9ebeafb3ed
--- /dev/null
+++ b/drivers/clocksource/timer-sprd.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define TIMER_NAME "sprd_timer"
+
+#define TIMER_LOAD_LO 0x0
+#define TIMER_LOAD_HI 0x4
+#define TIMER_VALUE_LO 0x8
+#define TIMER_VALUE_HI 0xc
+
+#define TIMER_CTL 0x10
+#define TIMER_CTL_PERIOD_MODE BIT(0)
+#define TIMER_CTL_ENABLE BIT(1)
+#define TIMER_CTL_64BIT_WIDTH BIT(16)
+
+#define TIMER_INT 0x14
+#define TIMER_INT_EN BIT(0)
+#define TIMER_INT_RAW_STS BIT(1)
+#define TIMER_INT_MASK_STS BIT(2)
+#define TIMER_INT_CLR BIT(3)
+
+#define TIMER_VALUE_SHDW_LO 0x18
+#define TIMER_VALUE_SHDW_HI 0x1c
+
+#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+
+static void sprd_timer_enable(void __iomem *base, u32 flag)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val |= TIMER_CTL_ENABLE;
+ if (flag & TIMER_CTL_64BIT_WIDTH)
+ val |= TIMER_CTL_64BIT_WIDTH;
+ else
+ val &= ~TIMER_CTL_64BIT_WIDTH;
+
+ if (flag & TIMER_CTL_PERIOD_MODE)
+ val |= TIMER_CTL_PERIOD_MODE;
+ else
+ val &= ~TIMER_CTL_PERIOD_MODE;
+
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_disable(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val &= ~TIMER_CTL_ENABLE;
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
+{
+ writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
+ writel_relaxed(0, base + TIMER_LOAD_HI);
+}
+
+static void sprd_timer_enable_interrupt(void __iomem *base)
+{
+ writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
+}
+
+static void sprd_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_INT);
+
+ val |= TIMER_INT_CLR;
+ writel_relaxed(val, base + TIMER_INT);
+}
+
+static int sprd_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), cycles);
+ sprd_timer_enable(timer_of_base(to), 0);
+
+ return 0;
+}
+
+static int sprd_timer_set_periodic(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
+ sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static int sprd_timer_shutdown(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ return 0;
+}
+
+static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_clear_interrupt(timer_of_base(to));
+
+ if (clockevent_state_oneshot(ce))
+ sprd_timer_disable(timer_of_base(to));
+
+ ce->event_handler(ce);
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sprd_timer_shutdown,
+ .set_state_periodic = sprd_timer_set_periodic,
+ .set_next_event = sprd_timer_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .handler = sprd_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init sprd_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ sprd_timer_enable_interrupt(timer_of_base(&to));
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ 1, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 8f2423789ba9..e5cdc3af684c 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -16,175 +17,318 @@
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
+#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
+#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
+#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
+#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
-struct stm32_clock_event_ddata {
- struct clock_event_device evtdev;
- unsigned periodic_top;
- void __iomem *base;
+#define TIM_PSC_MAX USHRT_MAX
+#define TIM_PSC_CLKRATE 10000
+
+struct stm32_timer_private {
+ int bits;
};
-static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
+/**
+ * stm32_timer_of_bits_set - set accessor helper
+ * @to: a timer_of structure pointer
+ * @bits: the number of bits (16 or 32)
+ *
+ * Accessor helper to set the number of bits in the timer-of private
+ * structure.
+ *
+ */
+static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ struct stm32_timer_private *pd = to->private_data;
- writel_relaxed(0, base + TIM_CR1);
- return 0;
+ pd->bits = bits;
+}
+
+/**
+ * stm32_timer_of_bits_get - get accessor helper
+ * @to: a timer_of structure pointer
+ *
+ * Accessor helper to get the number of bits in the timer-of private
+ * structure.
+ *
+ * Returns an integer corresponding to the number of bits.
+ */
+static int stm32_timer_of_bits_get(struct timer_of *to)
+{
+ struct stm32_timer_private *pd = to->private_data;
+
+ return pd->bits;
+}
+
+static void __iomem *stm32_timer_cnt __read_mostly;
+
+static u64 notrace stm32_read_sched_clock(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
+}
+
+static struct delay_timer stm32_timer_delay;
+
+static unsigned long stm32_read_delay(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
}
-static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+static void stm32_clock_event_disable(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ writel_relaxed(0, timer_of_base(to) + TIM_DIER);
+}
+
+/**
+ * stm32_timer_start - Start the counter without event
+ * @to: a timer_of structure pointer
+ *
+ * Start the timer in order to have the counter reset and start
+ * incrementing but disable interrupt event when there is a counter
+ * overflow. By default, the counter direction is used as upcounter.
+ */
+static void stm32_timer_start(struct timer_of *to)
+{
+ writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
+}
+
+static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_clock_event_disable(to);
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
+ struct clock_event_device *clkevt)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ struct timer_of *to = to_timer_of(clkevt);
+ unsigned long now, next;
+
+ next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
+ writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
+ now = readl_relaxed(timer_of_base(to) + TIM_CNT);
+
+ if ((next - now) > evt)
+ return -ETIME;
- writel_relaxed(evt, data->base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
- data->base + TIM_CR1);
+ writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
+
+ return 0;
+}
+
+static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
+
+ return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
+}
+
+static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
- struct stm32_clock_event_ddata *data = dev_id;
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- writel_relaxed(0, data->base + TIM_SR);
+ if (clockevent_state_periodic(clkevt))
+ stm32_clock_event_set_periodic(clkevt);
+ else
+ stm32_clock_event_shutdown(clkevt);
- data->evtdev.event_handler(&data->evtdev);
+ clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
-static struct stm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "stm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = stm32_clock_event_shutdown,
- .set_state_periodic = stm32_clock_event_set_periodic,
- .set_state_oneshot = stm32_clock_event_shutdown,
- .tick_resume = stm32_clock_event_shutdown,
- .set_next_event = stm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
+/**
+ * stm32_timer_width - Sort out the timer width (32/16)
+ * @to: a pointer to a timer-of structure
+ *
+ * Write the 32-bit max value and read/return the result. If the timer
+ * is 32 bits wide, the result will be UINT_MAX, otherwise it will
+ * be truncated by the 16-bit register to USHRT_MAX.
+ *
+ */
+static void __init stm32_timer_set_width(struct timer_of *to)
+{
+ u32 width;
+
+ writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
+
+ width = readl_relaxed(timer_of_base(to) + TIM_ARR);
+
+ stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
+}
-static int __init stm32_clockevent_init(struct device_node *np)
+/**
+ * stm32_timer_set_prescaler - Compute and set the prescaler register
+ * @to: a pointer to a timer-of structure
+ *
+ * Depending on the timer width, compute the prescaler to always
+ * target a 10MHz timer rate for 16 bits. 32-bit timers are
+ * considered precise and long enough to not use the prescaler.
+ */
+static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data = &clock_event_ddata;
- struct clk *clk;
- struct reset_control *rstc;
- unsigned long rate, max_delta;
- int irq, ret, bits, prescaler = 1;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
+ int prescaler = 1;
+
+ if (stm32_timer_of_bits_get(to) != 32) {
+ prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
+ TIM_PSC_CLKRATE);
+ /*
+ * The prescaler register is an u16, the variable
+ * can't be greater than TIM_PSC_MAX, let's cap it in
+ * this case.
+ */
+ prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
+ writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
+ writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- rate = clk_get_rate(clk);
+ /* Adjust rate and period given the prescaler value */
+ to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+}
- rstc = of_reset_control_get(np, NULL);
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
+static int __init stm32_clocksource_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
+ const char *name = to->np->full_name;
+
+ /*
+ * This driver allows to register several timers and relies on
+ * the generic time framework to select the right one.
+ * However, nothing allows to do the same for the
+ * sched_clock. We are not interested in a sched_clock for the
+ * 16-bit timers but only for the 32-bit one, so if no 32-bit
+ * timer is registered yet, we select this 32-bit timer as a
+ * sched_clock.
+ */
+ if (bits == 32 && !stm32_timer_cnt) {
+
+ /*
+ * Start immediately the counter as we will be using
+ * it right after.
+ */
+ stm32_timer_start(to);
+
+ stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
+ sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
+ pr_info("%s: STM32 sched_clock registered\n", name);
+
+ stm32_timer_delay.read_current_timer = stm32_read_delay;
+ stm32_timer_delay.freq = timer_of_rate(to);
+ register_current_timer_delay(&stm32_timer_delay);
+ pr_info("%s: STM32 delay timer registered\n", name);
}
- data->base = of_iomap(np, 0);
- if (!data->base) {
- ret = -ENXIO;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
+ return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
+ timer_of_rate(to), bits == 32 ? 250 : 100,
+ bits, clocksource_mmio_readl_up);
+}
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- pr_err("%pOF: failed to get irq.\n", np);
- goto err_get_irq;
- }
+static void __init stm32_clockevent_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
- /* Detect whether the timer is 16 or 32 bits */
- writel_relaxed(~0U, data->base + TIM_ARR);
- max_delta = readl_relaxed(data->base + TIM_ARR);
- if (max_delta == ~0U) {
- prescaler = 1;
- bits = 32;
- } else {
- prescaler = 1024;
- bits = 16;
- }
- writel_relaxed(0, data->base + TIM_ARR);
+ to->clkevt.name = to->np->full_name;
+ to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
+ to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
+ to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
+ to->clkevt.tick_resume = stm32_clock_event_shutdown;
+ to->clkevt.set_next_event = stm32_clock_event_set_next_event;
+ to->clkevt.rating = bits == 32 ? 250 : 100;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
+ (1 << bits) - 1);
+
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ to->np, bits);
+}
+
+static int __init stm32_timer_init(struct device_node *node)
+{
+ struct reset_control *rstc;
+ struct timer_of *to;
+ int ret;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- writel_relaxed(prescaler - 1, data->base + TIM_PSC);
- writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
- writel_relaxed(0, data->base + TIM_SR);
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = stm32_clock_event_handler;
- data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+ ret = timer_of_init(node, to);
+ if (ret)
+ goto err;
- clockevents_config_and_register(&data->evtdev,
- DIV_ROUND_CLOSEST(rate, prescaler),
- 0x1, max_delta);
+ to->private_data = kzalloc(sizeof(struct stm32_timer_private),
+ GFP_KERNEL);
+ if (!to->private_data)
+ goto deinit;
- ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
- "stm32 clockevent", data);
- if (ret) {
- pr_err("%pOF: failed to request irq.\n", np);
- goto err_get_irq;
+ rstc = of_reset_control_get(node, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
}
- pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
- np, bits);
+ stm32_timer_set_width(to);
- return ret;
+ stm32_timer_set_prescaler(to);
+
+ ret = stm32_clocksource_init(to);
+ if (ret)
+ goto deinit;
+
+ stm32_clockevent_init(to);
+ return 0;
-err_get_irq:
- iounmap(data->base);
-err_iomap:
- clk_disable_unprepare(clk);
-err_clk_enable:
- clk_put(clk);
-err_clk_get:
+deinit:
+ timer_of_cleanup(to);
+err:
+ kfree(to);
return ret;
}
-TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index bdce4488ded1..3a88e33b0cfe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,29 @@
# ARM CPU Frequency scaling drivers
#
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
If in doubt, say N.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
config ARM_SA1110_CPUFREQ
bool
-config ARM_SCPI_CPUFREQ
- tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
-
- This driver uses SCPI Message Protocol driver to interact with the
- firmware providing the CPU DVFS functionality.
-
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
This add the CPUFreq driver support for Intel PXA2xx SOCs.
If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- default n
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 812f9e0d01a3..e07715ce8844 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 65ec5f01aa8d..c56b57dcfda5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int cur_cluster = cpu_to_cluster(policy->cpu);
- struct device_node *np;
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
- np = of_node_get(cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(cdev[cur_cluster])) {
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev[cur_cluster]));
- cdev[cur_cluster] = NULL;
- }
- }
- of_node_put(np);
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 000000000000..c6ebc88a7d8d
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct clk *clk, u8 *divider)
+{
+ int load_lvl;
+ struct clk *parent;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+
+ /*
+ * Set cpu clock source, for all the level we keep the same
+ * clock source that the one already configured. For this one
+ * we need to use the clock framework
+ */
+ parent = clk_get_parent(clk);
+ clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned int cur_frequency;
+ struct regmap *nb_pm_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /* Get nominal (current) CPU frequency */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ if (!dvfs)
+ return -EINVAL;
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret) {
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+ return ret;
+ }
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ecc56e26f8f6..3b585e4bfac5 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "marvell,armadaxp", },
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
{ .compatible = "nvidia,tegra124", },
{ .compatible = "st,stih407", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 545946ad0752..de3d104c25d7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- struct device_node *np = of_node_get(priv->cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- priv->cdev = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(priv->cdev)) {
- dev_err(priv->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(priv->cdev));
-
- priv->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 41d148af7748..421f318c0e66 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
- struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+ struct cpufreq_policy *policy)
{
- int err = -EINVAL;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_PERFORMANCE;
- err = 0;
- } else if (!strncasecmp(str_governor, "powersave",
- CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_POWERSAVE;
- err = 0;
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
}
} else {
struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
-
- if (t == NULL) {
+ if (!t) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
+
ret = request_module("cpufreq_%s", str_governor);
- mutex_lock(&cpufreq_governor_mutex);
+ if (ret)
+ return -EINVAL;
- if (ret == 0)
- t = find_governor(str_governor);
- }
+ mutex_lock(&cpufreq_governor_mutex);
- if (t != NULL) {
- *governor = t;
- err = 0;
+ t = find_governor(str_governor);
}
+ if (t && !try_module_get(t->owner))
+ t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
+
+ if (t) {
+ policy->governor = t;
+ return 0;
+ }
}
- return err;
+
+ return -EINVAL;
}
/**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy,
- &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy))
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
+
+ if (new_policy.governor)
+ module_put(new_policy.governor->owner);
+
return ret ? ret : count;
}
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (policy->last_policy)
new_policy.policy = policy->last_policy;
else
- cpufreq_parse_governor(gov->name, &new_policy.policy,
- NULL);
+ cpufreq_parse_governor(gov->name, &new_policy);
}
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 58d4f4e1ad6a..ca38229b045a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,6 +22,8 @@
#include "cpufreq_governor.h"
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
+
static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
+ unsigned int sampling_interval;
int ret;
- ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
- if (ret != 1)
+
+ ret = sscanf(buf, "%u", &sampling_interval);
+ if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
return -EINVAL;
+ dbs_data->sampling_rate = sampling_interval;
+
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (ret)
goto free_policy_dbs_info;
- dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+ /*
+ * The sampling interval should not be less than the transition latency
+ * of the CPU and it also cannot be too small for dbs_update() to work
+ * correctly.
+ */
+ dbs_data->sampling_rate = max_t(unsigned int,
+ CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+ cpufreq_policy_transition_delay_us(policy));
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1e55b5790853..1572129844a5 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ struct cpufreq_stats {
unsigned int *trans_table;
};
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
- return 0;
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 628fe899cb48..741f22e5cee3 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
static struct device *cpu_dev;
static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
- old_freq = clk_get_rate(arm_clk) / 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
- clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
- clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
else
- clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
- clk_set_parent(step_clk, secondary_sel_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
} else {
- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, new_freq * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
- clk_prepare_enable(pll1_sys_clk);
+ clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, new_freq * 1000);
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
- clk_disable_unprepare(pll1_sys_clk);
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- policy->clk = arm_clk;
+ policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = policy->max;
@@ -226,23 +246,61 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
- of_machine_is_compatible("fsl,imx6q"))
- if (dev_pm_opp_disable(dev, 1200000000))
- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000))
dev_warn(dev, "failed to disable 996MHz OPP\n");
- if (of_machine_is_compatible("fsl,imx6q")) {
+
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000))
dev_warn(dev, "failed to disable 852MHz OPP\n");
+ if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+ if (dev_pm_opp_disable(dev, 1200000000))
+ dev_warn(dev, "failed to disable 1.2GHz OPP\n");
}
iounmap(base);
put_node:
of_node_put(np);
}
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz;
+ * 2b'11: Reserved;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -265,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- arm_clk = clk_get(cpu_dev, "arm");
- pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
- pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
- step_clk = clk_get(cpu_dev, "step");
- pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
- dev_err(cpu_dev, "failed to get clocks\n");
- ret = -ENOENT;
- goto put_clk;
- }
-
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull")) {
- pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
- secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
- if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
- dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
- ret = -ENOENT;
- goto put_clk;
- }
- }
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -310,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- imx6q_opp_check_speed_grading(cpu_dev);
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ imx6ul_opp_check_speed_grading(cpu_dev);
+ else
+ imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
@@ -423,22 +471,11 @@ put_reg:
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
-put_clk:
- if (!IS_ERR(arm_clk))
- clk_put(arm_clk);
- if (!IS_ERR(pll1_sys_clk))
- clk_put(pll1_sys_clk);
- if (!IS_ERR(pll1_sw_clk))
- clk_put(pll1_sw_clk);
- if (!IS_ERR(step_clk))
- clk_put(step_clk);
- if (!IS_ERR(pll2_pfd2_396m_clk))
- clk_put(pll2_pfd2_396m_clk);
- if (!IS_ERR(pll2_bus_clk))
- clk_put(pll2_bus_clk);
- if (!IS_ERR(secondary_sel_clk))
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
of_node_put(np);
+
return ret;
}
@@ -452,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
- clk_put(arm_clk);
- clk_put(pll1_sys_clk);
- clk_put(pll1_sw_clk);
- clk_put(step_clk);
- clk_put(pll2_pfd2_396m_clk);
- clk_put(pll2_bus_clk);
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a0e88bef76..7edf7a0e5a96 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-static const struct pstate_funcs bxt_funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
-};
-
#define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12df40dd..5faa37c5b091 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
- policy->cpuinfo.transition_latency = 200000; /* nsec */
+ policy->transition_delay_us = 200000; /* usec */
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index e0d5090b303d..8c04dddd3c28 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- struct device_node *np = of_node_get(info->cpu_dev->of_node);
- u32 capacitance = 0;
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
- info->cdev = of_cpufreq_power_cooling_register(np,
- policy, capacitance, NULL);
-
- if (IS_ERR(info->cdev)) {
- dev_err(info->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(info->cdev));
-
- info->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
{ .compatible = "mediatek,mt7622", },
{ .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index ed915ee85dd9..31513bd42705 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
return PTR_ERR(clk);
}
- /*
- * In case of a failure of dev_pm_opp_add(), we don't
- * bother with cleaning up the registered OPP (there's
- * no function to do so), and simply cancel the
- * registration of the cpufreq device.
- */
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
- return ret;
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ goto opp_register_failed;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
+
+opp_register_failed:
+ /* As registering has failed remove all the opp for all cpus */
+ dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+ return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index b6d7c4c98d0a..29cdec198657 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
@@ -38,14 +39,13 @@
#include <asm/opal.h>
#include <linux/timer.h>
-#define POWERNV_MAX_PSTATES 256
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
-#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
-#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -94,6 +94,27 @@ struct global_pstate_info {
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
bool wof_enabled;
} powernv_pstate_info;
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
- pr_warn_once("index %u is out of bound\n", i);
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
{
- int min = powernv_freqs[powernv_pstate_info.min].driver_data;
- int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
- if (min > 0) {
- if (unlikely((pstate < max) || (pstate > min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
- } else {
- if (unlikely((pstate > max) || (pstate < min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
}
- /*
- * abs() is deliberately used so that is works with
- * both monotonically increasing and decreasing
- * pstate values
- */
- return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
powernv_pstate_info.wof_enabled = true;
next:
- pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
+
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
- powernv_freqs[i].driver_data = id;
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = (struct pstate_idx_revmap_data *)
+ kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
- else if (id == pstate_nominal)
+ if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
- else if (id == pstate_min)
+ if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
}
/* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
- pr_warn("PState id %d outside of PState table, "
- "reporting nominal id %d instead\n",
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
*/
struct powernv_smp_call_data {
unsigned int freq;
- int pstate_id;
- int gpstate_id;
+ u8 pstate_id;
+ u8 gpstate_id;
};
/*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
- s8 local_pstate_id;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
-
- /*
- * The local pstate id corresponds bits 48..55 in the PMSR.
- * Note: Watch out for the sign!
- */
- local_pstate_id = (pmspr_val >> 48) & 0xFF;
- freq_data->pstate_id = local_pstate_id;
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
- pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
- raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
- freq_data->freq);
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
}
/*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax;
+ u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
- pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
- freq_data.gpstate_id = (s8)GET_GPSTATE(val);
- freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4ada55b8856e..0562761a3dec 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
- struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
- if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
- pr_err("cpu%d is not running as cooling device: %ld\n",
- policy->cpu, PTR_ERR(cpud->cdev));
-
- cpud->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 05d299052c5c..247fcbfa4cb5 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,27 +18,89 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+ struct thermal_cooling_device *cdev;
+};
static struct scpi_ops *scpi_ops;
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scpi_data *priv = policy->driver_data;
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+ if (!ret && (clk_get_rate(priv->clk) != rate))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return scpi_ops->get_transition_latency(cpu_dev);
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
}
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
- if (ret)
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_put_clk;
+ }
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+ return 0;
+
+out_put_clk:
+ clk_put(priv->clk);
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_cpumask_remove_table(policy->cpus);
+
return ret;
}
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
- .name = "scpi",
- .get_transition_latency = scpi_get_transition_latency,
- .init_opp_table = scpi_init_opp_table,
- .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ kfree(priv);
+ dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+ struct thermal_cooling_device *cdev;
+
+ cdev = of_cpufreq_cooling_register(policy);
+ if (!IS_ERR(cdev))
+ priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .ready = scpi_cpufreq_ready,
+ .target_index = scpi_cpufreq_set_target,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
+ int ret;
+
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
- return bL_cpufreq_register(&scpi_cpufreq_ops);
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
}
static int scpi_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 923317f03b4b..a099b7bf74cd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_mask;
unsigned long efuse_shift;
unsigned long rev_offset;
+ bool multi_regulator;
};
struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
+ struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_offset = 0x07fc,
.efuse_mask = 0x1fff,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
.efuse_offset = 0x0610,
.efuse_mask = 0x3f,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.efuse_mask = 0xf80000,
.efuse_shift = 19,
.rev_offset = 0x204,
+ .multi_regulator = true,
};
/**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
+ struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
+ const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT));
- if (ret) {
+ ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT);
+ if (IS_ERR(ti_opp_table)) {
dev_err(opp_data->cpu_dev,
"Failed to set supported hardware\n");
+ ret = PTR_ERR(ti_opp_table);
goto fail_put_node;
}
- of_node_put(opp_data->opp_node);
+ opp_data->opp_table = ti_opp_table;
+
+ if (opp_data->soc_data->multi_regulator) {
+ ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+ reg_names,
+ ARRAY_SIZE(reg_names));
+ if (IS_ERR(ti_opp_table)) {
+ dev_pm_opp_put_supported_hw(opp_data->opp_table);
+ ret = PTR_ERR(ti_opp_table);
+ goto fail_put_node;
+ }
+ }
+ of_node_put(opp_data->opp_node);
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
@@ -269,4 +290,22 @@ free_opp_data:
return ret;
}
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+ platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 4e78263e34a4..5d359aff3cc5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock acquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
+ if (!gov)
+ return -EINVAL;
+
if (gov == cpuidle_curr_governor)
return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47ec920d5b71..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index eeaf27859d80..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c44954e274bc..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 8ac3bd37203b..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -82,7 +80,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -109,6 +106,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 0a22ec5d1a96..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 456278440863..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index ce70b44d0fb6..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 616720a04e7a..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 8142de7ba050..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff02b713c6f6..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -21,7 +21,6 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5aa0c2..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
@@ -18,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4eed7171e2ae..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -73,6 +73,29 @@
#define IV AES_BLOCK_SIZE
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
if (input == NULL)
goto out;
- reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
}
out:
req->base.complete(&req->base, err);
+}
- }
-
-static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
-
-
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
- req->base.complete(&req->base, err);
-}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
skip = 0;
}
}
- if (walk->nents == 0) {
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
-{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
-}
-
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
if (srclen <= dstlen)
break;
less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
+ dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
if (offset == sg_dma_len(dst)) {
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip = 0;
}
src = sg_next(src);
- srcskip = 0;
+ srcskip = 0;
}
return min(srclen, dstlen);
}
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
- unsigned int dst_size;
unsigned int authsize = crypto_aead_authsize(tfm);
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, temp;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
return NULL;
reqctx->b0_dma = 0;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp & 0xF,
null ? 0 : assoclen + IV + 1,
temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
- memcpy(reqctx->iv, req->iv, IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
+ }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
@@ -2202,9 +2220,9 @@ err:
return ERR_PTR(error);
}
-static int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2246,9 +2264,9 @@ err:
return -ENOMEM;
}
-static void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev,
}
}
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
}
}
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid)
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam)
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
-// reqctx->srcsg = ulp_walk.last_sg;
-// reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
+ 0);
+ ulptx_walk_end(&ulp_walk);
}
}
-
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev,
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
- return error;
+ return -ENOMEM;
req_ctx->is_sg_map = 1;
return 0;
}
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev,
}
-
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2469,8 +2483,9 @@ err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
assoclen = req->assoclen - 8;
reqctx->b0_dma = 0;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst,
- req->cryptlen + (op_type ? -authsize : authsize),
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3375,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 96c9335ee728..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
-
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f5a2624081dc..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 94e7412f6164..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 94a87e3ad9bc..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -210,8 +212,6 @@ struct dsgl_walk {
struct phys_sge_pairs *to;
};
-
-
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -227,21 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
-static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
- unsigned short op_type);
-static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
- *req, unsigned short op_type);
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid);
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type);
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam);
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req);
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req);
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param);
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req);
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e85c0f3..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -607,14 +646,11 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
+ kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -622,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -646,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -665,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -690,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -754,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -768,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -780,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -838,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -846,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -855,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -902,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552bc6d7..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -26,13 +27,17 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
unsigned int key_len;
};
+struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
+ bool needs_inv;
+};
+
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
@@ -64,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -73,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -90,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -126,9 +137,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -238,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -265,7 +276,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +351,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -351,14 +359,34 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return ndesc;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,9 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
-
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -381,32 +407,54 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_aes_send(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request req;
+ SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct skcipher_request));
+ memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
- skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
- skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_cipher_send_inv;
+ sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -424,19 +472,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->direction = dir;
+ sreq->needs_inv = false;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_cipher_send_inv;
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.send = safexcel_aes_send;
-
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
@@ -450,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -476,6 +526,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_send;
+ ctx->base.handle_result = safexcel_handle_result;
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
return 0;
}
@@ -494,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb6227101..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -32,9 +31,12 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
+ bool needs_inv;
+
+ int nents;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
@@ -119,15 +121,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len, result_sz = sreq->state_sz;
+ int cache_len;
*ret = 0;
@@ -148,11 +150,13 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
- result_sz = crypto_ahash_digestsize(ahash);
- memcpy(sreq->state, areq->result, result_sz);
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -165,9 +169,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -273,7 +291,7 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+ ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
@@ -292,7 +310,6 @@ send_command:
req->processed += len;
request->req = &areq->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
@@ -374,8 +391,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -384,14 +399,36 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
return 1;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int err;
+
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
+ if (req->needs_inv) {
+ req->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -400,8 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -412,32 +448,50 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_ahash_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int ret;
+
+ if (req->needs_inv)
+ ret = safexcel_ahash_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_ahash_send_req(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct ahash_request req;
+ AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct ahash_request));
+ memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
- ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
- ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_ahash_send_inv;
+ rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -450,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -470,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -481,14 +545,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->base.send = safexcel_ahash_send;
-
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+ req->needs_inv = false;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_ahash_send_inv;
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+ if (ctx->base.needs_inv) {
+ ctx->base.needs_inv = false;
+ req->needs_inv = true;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -504,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -588,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -622,6 +694,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
@@ -668,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -809,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -874,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -881,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8705b28eb02c..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 293832488cc9..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -24,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -409,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -420,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52cf2ecc..662e709812cc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index f2246a5abcf6..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void)
}
if (!per_cpu(cpu_txwin, i)) {
- /* shoudn't happen, Each chip will have NX engine */
- pr_err("NX engine is not availavle for CPU %d\n", i);
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
return -EINVAL;
}
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 5a6dc53b2b9d..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 142c6020cec7..188f44b7eb27 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,17 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 and Exynos HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Hash part based on omap-sham.c driver.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data)
&dd->hash_flags)) {
/* hash or semi-hash ready */
clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
+ goto finish;
}
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 78fb496ecb4e..fe2af6aa88fc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index bc1cb284111c..12b62d0aac27 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -157,13 +157,13 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
spin_unlock_irqrestore(&dcb->poll->lock, flags);
}
-static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct dma_fence *fence_excl;
- unsigned long events;
+ __poll_t events;
unsigned shared_count, seq;
dmabuf = file->private_data;
@@ -195,7 +195,7 @@ retry:
if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- unsigned long pevents = POLLIN;
+ __poll_t pevents = POLLIN;
if (shared_count == 0)
pevents |= POLLOUT;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 03830634e141..8e8c4a12a0bc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -312,7 +312,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b52b0d55247e..97483df1f82e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_desc_free(&plchan->at->vd);
+ vchan_terminate_vdesc(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
return 0;
}
+static void pl08x_synchronize(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ vchan_synchronize(&plchan->vc);
+}
+
static int pl08x_pause(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.device_pause = pl08x_pause;
pl08x->memcpy.device_resume = pl08x_resume;
pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
+ pl08x->memcpy.device_synchronize = pl08x_synchronize;
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
@@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_pause = pl08x_pause;
pl08x->slave.device_resume = pl08x_resume;
pl08x->slave.device_terminate_all = pl08x_terminate_all;
+ pl08x->slave.device_synchronize = pl08x_synchronize;
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.directions =
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271b3bf9..a861b5b4d443 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct data_chunk *first = xt->sgl;
+ struct data_chunk *first;
struct at_desc *desc = NULL;
size_t xfer_count;
unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
return NULL;
+ first = xt->sgl;
+
dev_info(chan2dev(chan),
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6204cc32d09c..847f84a41a69 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- bcm2835_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
@@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void bcm2835_dma_synchronize(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
int irq, unsigned int irq_flags)
{
@@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+ od->ddev.device_synchronize = bcm2835_dma_synchronize;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index f7e965f63274..d9bee65a18a4 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
ARRAY_SIZE(am335x_usb_queues_tx));
- if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
+ if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273fed715..afd5e10f8927 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
irq = platform_get_irq(pdev, 0);
ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7373b7a555ec..85820a2d69d4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
/* Clear the DMA status and stop the transfer. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
if (jzchan->desc) {
- jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
@@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void jz4780_dma_synchronize(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_synchronize(&jzchan->vchan);
+}
+
static int jz4780_dma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
dd->device_config = jz4780_dma_config;
dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_synchronize = jz4780_dma_synchronize;
dd->device_tx_status = jz4780_dma_tx_status;
dd->device_issue_pending = jz4780_dma_issue_pending;
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7fbf91f..80cc2be6483c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COUNT_MASK 0x1f
#define PATTERN_MEMSET_IDX 0x01
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
u8 **dsts;
u8 **udsts;
enum dma_transaction_type type;
+ wait_queue_head_t done_wait;
+ struct dmatest_done test_done;
bool done;
};
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
- bool done;
- wait_queue_head_t *wait;
-};
static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
-
- done->done = true;
- wake_up_all(done->wait);
+ struct dmatest_thread *thread =
+ container_of(done, struct dmatest_thread, test_done);
+ if (!thread->done) {
+ done->done = true;
+ wake_up_all(done->wait);
+ } else {
+ /*
+ * If thread->done, it means that this callback occurred
+ * after the parent thread has cleaned up. This can
+ * happen in the case that driver doesn't implement
+ * the terminate_all() functionality and a dma operation
+ * did not occur within the timeout period
+ */
+ WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+ }
}
static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
*/
static int dmatest_func(void *data)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
- struct dmatest_done done = { .wait = &done_wait };
+ struct dmatest_done *done = &thread->test_done;
struct dmatest_info *info;
struct dmatest_params *params;
struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
continue;
}
- done.done = false;
+ done->done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &done;
+ tx->callback_param = done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait, done.done,
+ wait_event_freezable_timeout(thread->done_wait, done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (!done.done) {
- /*
- * We're leaving the timed out dma operation with
- * dangling pointer to done_wait. To make this
- * correct, we'll need to allocate wait_done for
- * each test iteration and perform "who's gonna
- * free it this time?" dancing. For now, just
- * leave it dangling.
- */
- WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+ if (!done->done) {
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
- if (ret)
+ if (ret || failed_tests)
dmaengine_terminate_all(chan);
thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
+ thread->test_done.wait = &thread->done_wait;
+ init_waitqueue_head(&thread->done_wait);
smp_wmb();
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 9364a3ed345a..948df1ab5f1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
/* Move the cyclic channel back to default queue */
if (!echan->tc && echan->edesc->cyclic)
edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
- /*
- * free the running request descriptor
- * since it is not in any of the vdesc lists
- */
- edma_desc_free(&echan->edesc->vdesc);
+
+ vchan_terminate_vdesc(&echan->edesc->vdesc);
echan->edesc = NULL;
}
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
}
}
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
{
int i;
- for (i = 0; i < DMAMUX_NR; i++)
+ for (i = 0; i < nr_clocks; i++)
clk_disable_unprepare(fsl_edma->muxclk[i]);
}
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsl_edma->muxbase[i]))
+ if (IS_ERR(fsl_edma->muxbase[i])) {
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxbase[i]);
+ }
sprintf(clkname, "dmamux%d", i);
fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret) {
- /* disable only clks which were enabled on error */
- for (; i >= 0; i--)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
-
- dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
- return ret;
- }
+ if (ret)
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
}
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return 0;
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0391f930aecc..25cec9c243e1 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
- struct mdc_tx_desc *mdesc;
unsigned long flags;
LIST_HEAD(head);
@@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
MDC_CONTROL_AND_STATUS);
- mdesc = mchan->desc;
- mchan->desc = NULL;
+ if (mchan->desc) {
+ vchan_terminate_vdesc(&mchan->desc->vd);
+ mchan->desc = NULL;
+ }
vchan_get_all_descriptors(&mchan->vc, &head);
mdc_get_new_events(mchan);
spin_unlock_irqrestore(&mchan->vc.lock, flags);
- if (mdesc)
- mdc_desc_free(&mdesc->vd);
vchan_dma_desc_free_list(&mchan->vc, &head);
return 0;
}
+static void mdc_synchronize(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+
+ vchan_synchronize(&mchan->vc);
+}
+
static int mdc_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+ mdma->dma_dev.device_synchronize = mdc_synchronize;
mdma->dma_dev.device_config = mdc_slave_config;
mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2184881afe76..e7db24c67030 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX SDMA driver");
+#if IS_ENABLED(CONFIG_SOC_IMX6Q)
+MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
+#endif
+#if IS_ENABLED(CONFIG_SOC_IMX7D)
+MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
+#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 58d4ccd33672..8b5b23a8ace9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -597,7 +597,6 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
@@ -715,7 +714,6 @@ static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d0caa6..7792a9186f9c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
unmap_dma:
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01d2a750a621..26b67455208f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
c->phy = NULL;
p->vchan = NULL;
if (p->ds_run) {
- k3_dma_free_desc(&p->ds_run->vd);
+ vchan_terminate_vdesc(&p->ds_run->vd);
p->ds_run = NULL;
}
p->ds_done = NULL;
@@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void k3_dma_synchronize(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int k3_dma_transfer_pause(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_pause = k3_dma_transfer_pause;
d->slave.device_resume = k3_dma_transfer_resume;
d->slave.device_terminate_all = k3_dma_terminate_all;
+ d->slave.device_synchronize = k3_dma_synchronize;
d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
/* init virtual channel */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 5ba5714d0b7c..94d7bd7d2880 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
- if (IS_ERR(ch->cookie))
- return PTR_ERR(ch->cookie);
- return 0;
+ return PTR_ERR_OR_ZERO(ch->cookie);
}
static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f6dd849159d8..d21c19822feb 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- omap_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e3669850aef4..963cc5228d05 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,6 +50,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -104,6 +105,10 @@ static unsigned int nr_desc_prm;
module_param(nr_desc_prm, uint, 0644);
MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+enum hidma_cap {
+ HIDMA_MSI_CAP = 1,
+ HIDMA_IDENTITY_CAP,
+};
/* process completed descriptors */
static void hidma_process_completed(struct hidma_chan *mchan)
@@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#endif
}
-static bool hidma_msi_capable(struct device *dev)
+static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *of_compat;
- int ret = -EINVAL;
-
- if (!adev || acpi_disabled) {
- ret = device_property_read_string(dev, "compatible",
- &of_compat);
- if (ret)
- return false;
+ enum hidma_cap cap;
- ret = strcmp(of_compat, "qcom,hidma-1.1");
- } else {
-#ifdef CONFIG_ACPI
- ret = strcmp(acpi_device_hid(adev), "QCOM8062");
-#endif
- }
- return ret == 0;
+ cap = (enum hidma_cap) device_get_match_data(dev);
+ return cap ? ((cap & test_cap) > 0) : 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev)
* Determine the MSI capability of the platform. Old HW doesn't
* support MSI.
*/
- msi = hidma_msi_capable(&pdev->dev);
-
+ msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev)
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
- dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+ if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
+ dmadev->chidx = readl(dmadev->dev_trca + 0x40);
+ else
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
/* Set DMA mask to 64 bits. */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
- {"QCOM8062"},
+ {"QCOM8062", HIDMA_MSI_CAP},
+ {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
{},
};
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
@@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1",},
+ {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
+ {.compatible = "qcom,hidma-1.2",
+ .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 4999e266b2de..7c6e2ff212a2 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
*/
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
+ unsigned long irqflags;
+
if (cause & HIDMA_ERR_INT_MASK) {
dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
@@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
return;
}
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+
/*
* Fine tuned for this HW...
*
@@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
* Try to consume as many EVREs as possible.
*/
hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
}
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 7335e2eb9b72..000c7019ca7d 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/module.h>
@@ -356,67 +357,37 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
struct platform_device *pdev_parent = of_find_device_by_node(np);
struct platform_device_info pdevinfo;
- struct of_phandle_args out_irq;
struct device_node *child;
- struct resource *res = NULL;
- const __be32 *cell;
- int ret = 0, size, i, num;
- u64 addr, addr_size;
+ struct resource *res;
+ int ret = 0;
+
+ /* allocate a resource array */
+ res = kcalloc(3, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
for_each_available_child_of_node(np, child) {
- struct resource *res_iter;
struct platform_device *new_pdev;
- cell = of_get_property(child, "reg", &size);
- if (!cell) {
- ret = -EINVAL;
+ ret = of_address_to_resource(child, 0, &res[0]);
+ if (!ret)
goto out;
- }
-
- size /= sizeof(*cell);
- num = size /
- (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
- /* allocate a resource array */
- res = kcalloc(num, sizeof(*res), GFP_KERNEL);
- if (!res) {
- ret = -ENOMEM;
+ ret = of_address_to_resource(child, 1, &res[1]);
+ if (!ret)
goto out;
- }
-
- /* read each reg value */
- i = 0;
- res_iter = res;
- while (i < size) {
- addr = of_read_number(&cell[i],
- of_n_addr_cells(child));
- i += of_n_addr_cells(child);
-
- addr_size = of_read_number(&cell[i],
- of_n_size_cells(child));
- i += of_n_size_cells(child);
-
- res_iter->start = addr;
- res_iter->end = res_iter->start + addr_size - 1;
- res_iter->flags = IORESOURCE_MEM;
- res_iter++;
- }
- ret = of_irq_parse_one(child, 0, &out_irq);
- if (ret)
+ ret = of_irq_to_resource(child, 0, &res[2]);
+ if (ret <= 0)
goto out;
- res_iter->start = irq_create_of_mapping(&out_irq);
- res_iter->name = "hidma event irq";
- res_iter->flags = IORESOURCE_IRQ;
-
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.fwnode = &child->fwnode;
pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
pdevinfo.name = child->name;
pdevinfo.id = object_counter++;
pdevinfo.res = res;
- pdevinfo.num_res = num;
+ pdevinfo.num_res = 3;
pdevinfo.data = NULL;
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
@@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
*/
of_msi_configure(&new_pdev->dev, child);
of_node_put(child);
- kfree(res);
- res = NULL;
}
out:
kfree(res);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f04c4702d98b..cd92d696bcf9 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
/* Dequeue current job */
if (s3cchan->at) {
- s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ vchan_terminate_vdesc(&s3cchan->at->vd);
s3cchan->at = NULL;
}
@@ -744,6 +744,13 @@ unlock:
return ret;
}
+static void s3c24xx_dma_synchronize(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+
+ vchan_synchronize(&s3cchan->vc);
+}
+
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
s3cdma->slave.filter.map = pdata->slave_map;
s3cdma->slave.filter.mapcnt = pdata->slavecnt;
s3cdma->slave.filter.fn = s3c24xx_dma_filter;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b2c7db3e480..e3ff162c03fc 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
/* -----------------------------------------------------------------------------
* Stop and reset
*/
+static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
+{
+ u32 chcr;
+ unsigned int i;
+
+ /*
+ * Ensure that the setting of the DE bit is actually 0 after
+ * clearing it.
+ */
+ for (i = 0; i < 1024; i++) {
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+ udelay(1);
+ }
+
+ dev_err(chan->chan.device->dev, "CHCR DE check error\n");
+}
+
+static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+
+ /* set DE=0 and flush remaining data */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
+
+ /* make sure all remaining data was flushed */
+ rcar_dmac_chcr_de_barrier(chan);
+
+ /* back DE */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
{
@@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+ rcar_dmac_chcr_de_barrier(chan);
}
static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
@@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
+ if (desc->direction == DMA_DEV_TO_MEM)
+ rcar_dmac_sync_tcr(chan);
+
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+ residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
return residue;
}
@@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+ if (mask & RCAR_DMACHCR_DE)
+ rcar_dmac_chcr_de_barrier(chan);
if (chcr & RCAR_DMACHCR_DSE)
ret |= rcar_dmac_isr_desc_stage_end(chan);
@@ -1615,22 +1657,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
- /*
- * TODO: Wait for the current transfer to complete and stop the device.
- */
- return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
- /* TODO: Resume transfers, if any. */
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM
static int rcar_dmac_runtime_suspend(struct device *dev)
{
@@ -1646,7 +1672,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rcar_dmac_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b652071a2096..b106e8a60af6 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
return 0;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5db0f6e1ff8..4dbb30cf94ac 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b9d75a54c896..9a558e30c461 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
+ sconfig->device_fc) {
if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
return -EINVAL;
tdc->slave_id = sconfig->slave_id;
@@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= TEGRA_APBDMA_CSR_ONCE;
+
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 7df910e7c348..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..395c698edb4d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
break;
else {
dev_err(chan2dev(chan),
- "Couldnt allocate any descriptors\n");
+ "Couldn't allocate any descriptors\n");
return -ENOMEM;
}
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 545e97279083..88ad8ed2a8d6 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- if (dmaengine_desc_test_reuse(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+ vchan_vdesc_fini(vd);
dmaengine_desc_callback_invoke(&cb, NULL);
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 3f776a46a29c..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
struct list_head desc_completed;
struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
}
/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
* vchan_cyclic_callback - report the completion of a period
* @vd: virtual descriptor
*/
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
}
/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
* vchan_next_desc - peek at the next descriptor to be processed
* @vc: virtual channel to obtain descriptor from
*
@@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
* Makes sure that all scheduled or active callbacks have finished running. For
* proper operation the caller has to ensure that no new callbacks are scheduled
* after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ unsigned long flags;
+
tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
}
#endif
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eef13380ca8..27b523530c4a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
#define XILINX_DMA_REG_PARK_PTR 0x0028
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
@@ -163,6 +165,7 @@
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
/* Multi-Channel DMA Descriptor offsets*/
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @pad1: Reserved @0x10
- * @pad2: Reserved @0x14
+ * @mcdma_control: Control field for mcdma @0x10
+ * @vsize_stride: Vsize and Stride field for mcdma @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
- * @src_addrmsb: Source address MSB @0x0C
+ * @src_addr_msb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
- * @dest_addrmsb: Destination address MSB @0x14
+ * @dest_addr_msb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
* @pending_list: Descriptors waiting
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @dev: The dma device
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
+ * @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
+ * @tdest: TDEST value for mcdma
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
struct list_head pending_list;
struct list_head active_list;
struct list_head done_list;
+ struct list_head free_seg_list;
struct dma_chan common;
struct dma_pool *desc_pool;
struct device *dev;
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
bool cyclic;
bool genlock;
bool err;
+ bool idle;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
u32 desc_submitcount;
u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
};
/**
- * enum xdma_ip_type: DMA IP type.
+ * enum xdma_ip_type - DMA IP type.
*
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
*
*/
enum xdma_ip_type {
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
{
- struct xilinx_axidma_tx_segment *segment;
- dma_addr_t phys;
-
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ unsigned long flags;
- segment->phys = phys;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_axidma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
return segment;
}
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
struct xilinx_axidma_tx_segment *segment)
{
- dma_pool_free(chan->desc_pool, segment, segment->phys);
+ xilinx_dma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
}
/**
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan);
+
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+
+ /* Free Memory that is allocated for cyclic DMA Mode */
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
}
/**
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ int i;
/* Has this channel already been allocated? */
if (chan->desc_pool)
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* for meeting Xilinx VDMA specification requirement.
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_axidma_tx_segment),
- __alignof__(struct xilinx_axidma_tx_segment),
- 0);
+ /* Allocate the buffer descriptors. */
+ chan->seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_v[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_v[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
0);
}
- if (!chan->desc_pool) {
+ if (!chan->desc_pool &&
+ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -855,22 +919,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/*
- * For AXI DMA case after submitting a pending_list, keep
- * an extra segment allocated so that the "next descriptor"
- * pointer on the tail descriptor always points to a
- * valid descriptor, even when paused after reaching taildesc.
- * This way, it is possible to issue additional
- * transfers without halting and restarting the channel.
- */
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
- /*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p, GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
}
dma_cookie_init(dchan);
@@ -936,34 +998,10 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
}
/**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_HALTED) &&
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
- XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_IDLE;
-}
-
-/**
* xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
- u32 reg;
+ u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment;
/* This function was invoked with lock held */
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
-
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* Configure channel to allow number frame buffers */
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
- chan->desc_pendingcount);
-
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- if (config->park && (config->park_frm >= 0) &&
- (config->park_frm < chan->num_frms)) {
- if (chan->direction == DMA_MEM_TO_DEV)
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
- else
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
}
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- }
- if (!chan->has_sg) {
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
chan->desc_submitcount++;
chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
- } else {
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
}
+
+ chan->idle = false;
}
/**
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if (chan->has_sg) {
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ struct xilinx_axidma_tx_segment *tail_segment;
u32 reg;
if (chan->err)
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
+ if (!chan->idle)
return;
- }
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
- if (chan->has_sg && !chan->xdev->mcdma) {
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
-
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
-
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
- }
-
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
}
chan->err = false;
+ chan->idle = true;
+ chan->desc_submitcount = 0;
return err;
}
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
chan->start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_tx_segment *segment;
struct xilinx_vdma_desc_hw *hw;
if (!is_slave_direction(xt->dir))
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
- prev = segment;
-
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
u32 *app_w = (u32 *)context;
struct scatterlist *sg;
size_t copy;
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
XILINX_DMA_NUM_APP_WORDS);
}
- if (prev)
- prev->hw.next_desc = segment->phys;
-
- prev = segment;
sg_used += copy;
/*
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment = list_first_entry(&desc->segments,
struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (chan->direction == DMA_MEM_TO_DEV) {
@@ -1821,11 +1839,14 @@ error:
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
+ * @dchan: DMA channel
+ * @buf_addr: Physical address of the buffer
+ * @buf_len: Total length of the cyclic buffers
+ * @period_len: length of individual cyclic buffer
* @direction: DMA direction
* @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
@@ -2009,7 +2030,9 @@ error:
/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
+ * @dchan: Driver specific DMA Channel pointer
+ *
+ * Return: '0' always.
*/
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
+ chan->idle = true;
if (chan->cyclic) {
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
chan->cyclic = false;
}
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
return 0;
}
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
+ * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
+ /* This variable ensures that descriptors are not
+ * Submitted when dma engine is in progress. This variable is
+ * Added to avoid polling for a bit in the status register to
+ * Know dma state in the driver hot path.
+ */
+ chan->idle = true;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->free_seg_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
* Return: 0 always.
*/
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node) {
+ struct device_node *node)
+{
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto error;
}
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
return 0;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 1ee1241ca797..f14645817ed8 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
@@ -47,6 +48,7 @@
#define ZYNQMP_DMA_SRC_START_MSB 0x15C
#define ZYNQMP_DMA_DST_START_LSB 0x160
#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_TOTAL_BYTE 0x188
#define ZYNQMP_DMA_RATE_CTRL 0x18C
#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
@@ -138,6 +140,8 @@
#define ZYNQMP_DMA_BUS_WIDTH_64 64
#define ZYNQMP_DMA_BUS_WIDTH_128 128
+#define ZDMA_PM_TIMEOUT 100
+
#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
- * @clk_main: Pointer to main clock
- * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
- struct clk *clk_main;
- struct clk *clk_apb;
};
/**
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_device {
struct device *dev;
struct dma_device common;
struct zynqmp_dma_chan *chan;
+ struct clk *clk_main;
+ struct clk *clk_apb;
};
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
struct zynqmp_dma_desc_sw *desc;
- int i;
+ int i, ret;
+
+ ret = pm_runtime_get_sync(chan->dev);
+ if (ret < 0)
+ return ret;
chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
GFP_KERNEL);
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
{
writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
chan->idle = false;
writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
}
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
*/
static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
{
- u32 val;
-
+ if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
}
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
* zynqmp_dma_device_config - Zynqmp dma device configuration
* @dchan: DMA channel
* @config: DMA device config
+ *
+ * Return: 0 always
*/
static int zynqmp_dma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
/**
* zynqmp_dma_free_descriptors - Free channel descriptors
- * @dchan: DMA channel pointer
+ * @chan: ZynqMP DMA channel pointer
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
kfree(chan->sw_desc_pool);
+ pm_runtime_mark_last_busy(chan->dev);
+ pm_runtime_put_autosuspend(chan->dev);
}
/**
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
if (status & ZYNQMP_DMA_INT_OVRFL) {
zynqmp_dma_handle_ovfl_int(chan, status);
- dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
ret = IRQ_HANDLED;
}
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if (!chan)
return;
- devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
tasklet_kill(&chan->tasklet);
list_del(&chan->common.device_node);
- clk_disable_unprepare(chan->clk_apb);
- clk_disable_unprepare(chan->clk_main);
}
/**
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
"zynqmp-dma", chan);
if (err)
return err;
- chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
- if (IS_ERR(chan->clk_main)) {
- dev_err(&pdev->dev, "main clock not found.\n");
- return PTR_ERR(chan->clk_main);
- }
-
- chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
- if (IS_ERR(chan->clk_apb)) {
- dev_err(&pdev->dev, "apb clock not found.\n");
- return PTR_ERR(chan->clk_apb);
- }
-
- err = clk_prepare_enable(chan->clk_main);
- if (err) {
- dev_err(&pdev->dev, "Unable to enable main clock.\n");
- return err;
- }
-
- err = clk_prepare_enable(chan->clk_apb);
- if (err) {
- clk_disable_unprepare(chan->clk_main);
- dev_err(&pdev->dev, "Unable to enable apb clock.\n");
- return err;
- }
chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
chan->idle = true;
@@ -953,6 +941,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
}
/**
+ * zynqmp_dma_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_resume - Resume from suspend
+ * @dev: Address of the device structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(zdev->clk_main);
+ clk_disable_unprepare(zdev->clk_apb);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(zdev->clk_main);
+ if (err) {
+ dev_err(dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(zdev->clk_apb);
+ if (err) {
+ dev_err(dev, "Unable to enable apb clock.\n");
+ clk_disable_unprepare(zdev->clk_main);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
+ SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
+ zynqmp_dma_runtime_resume, NULL)
+};
+
+/**
* zynqmp_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->device_config = zynqmp_dma_device_config;
p->dev = &pdev->dev;
+ zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(zdev->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(zdev->clk_main);
+ }
+
+ zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(zdev->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(zdev->clk_apb);
+ }
+
platform_set_drvdata(pdev, zdev);
+ pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(zdev->dev);
+ pm_runtime_enable(zdev->dev);
+ pm_runtime_get_sync(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev)) {
+ ret = zynqmp_dma_runtime_resume(zdev->dev);
+ if (ret)
+ return ret;
+ }
ret = zynqmp_dma_chan_probe(zdev, pdev);
if (ret) {
dev_err(&pdev->dev, "Probing channel failed\n");
- goto free_chan_resources;
+ goto err_disable_pm;
}
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
goto free_chan_resources;
}
+ pm_runtime_mark_last_busy(zdev->dev);
+ pm_runtime_put_sync_autosuspend(zdev->dev);
+
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
return 0;
free_chan_resources:
zynqmp_dma_chan_remove(zdev->chan);
+err_disable_pm:
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
return ret;
}
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
+ pm_runtime_disable(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
return 0;
}
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
.driver = {
.name = "xilinx-zynqmp-dma",
.of_match_table = zynqmp_dma_of_match,
+ .pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 96afb2aeed18..3c4017007647 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -457,4 +457,11 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_TI
+ tristate "Texas Instruments DDR3 ECC Controller"
+ depends on ARCH_KEYSTONE || SOC_DRA7XX
+ help
+ Support for error detection and correction on the
+ TI SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 0fd9ffa63299..b54912eb39af 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index ec5d695bbb72..3c68bb525d5d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 9c1ffe3e912b..aeb222ca3ed1 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
new file mode 100644
index 000000000000..6ac26d1b929f
--- /dev/null
+++ b/drivers/edac/ti_edac.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Texas Instruments DDR3 ECC error correction and detection driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+#include "edac_module.h"
+
+/* EMIF controller registers */
+#define EMIF_SDRAM_CONFIG 0x008
+#define EMIF_IRQ_STATUS 0x0ac
+#define EMIF_IRQ_ENABLE_SET 0x0b4
+#define EMIF_ECC_CTRL 0x110
+#define EMIF_1B_ECC_ERR_CNT 0x130
+#define EMIF_1B_ECC_ERR_THRSH 0x134
+#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
+#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
+
+/* Bit definitions for EMIF_SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK GENMASK(31, 29)
+#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
+#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
+#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
+#define SDRAM_K2_NARROW_MODE_SHIFT 12
+#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
+#define SDRAM_ROWSIZE_SHIFT 7
+#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
+#define SDRAM_IBANK_SHIFT 4
+#define SDRAM_IBANK_MASK GENMASK(6, 4)
+#define SDRAM_K2_IBANK_SHIFT 5
+#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
+#define SDRAM_K2_EBANK_SHIFT 3
+#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
+#define SDRAM_PAGESIZE_SHIFT 0
+#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
+#define SDRAM_K2_PAGESIZE_SHIFT 0
+#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
+
+#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
+
+/* IRQ bit definitions */
+#define EMIF_1B_ECC_ERR BIT(5)
+#define EMIF_2B_ECC_ERR BIT(4)
+#define EMIF_WR_ECC_ERR BIT(3)
+#define EMIF_SYS_ERR BIT(0)
+/* Bit 31 enables ECC and 28 enables RMW */
+#define ECC_ENABLED (BIT(31) | BIT(28))
+
+#define EDAC_MOD_NAME "ti-emif-edac"
+
+enum {
+ EMIF_TYPE_DRA7,
+ EMIF_TYPE_K2
+};
+
+struct ti_edac {
+ void __iomem *reg;
+};
+
+static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
+{
+ return readl_relaxed(edac->reg + offset);
+}
+
+static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
+{
+ writel_relaxed(val, edac->reg + offset);
+}
+
+static irqreturn_t ti_edac_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct ti_edac *edac = mci->pvt_info;
+ u32 irq_status;
+ u32 err_addr;
+ int err_count;
+
+ irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
+
+ if (irq_status & EMIF_1B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
+ err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
+ ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "1B");
+ }
+
+ if (irq_status & EMIF_2B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "2B");
+ }
+
+ if (irq_status & EMIF_WR_ECC_ERR)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 0, 0, -1, 0, 0, 0,
+ mci->ctl_name, "WR");
+
+ ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
+{
+ struct dimm_info *dimm;
+ struct ti_edac *edac = mci->pvt_info;
+ int bits;
+ u32 val;
+ u32 memsize;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+
+ val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
+
+ if (type == EMIF_TYPE_DRA7) {
+ bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
+ bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
+ bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
+
+ if (val & SDRAM_NARROW_MODE_MASK) {
+ bits++;
+ dimm->dtype = DEV_X16;
+ } else {
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ }
+ } else {
+ bits = 16;
+ bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
+ SDRAM_K2_PAGESIZE_SHIFT) + 8;
+ bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
+ bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
+
+ val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
+ SDRAM_K2_NARROW_MODE_SHIFT;
+ switch (val) {
+ case 0:
+ bits += 3;
+ dimm->dtype = DEV_X64;
+ break;
+ case 1:
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ break;
+ case 2:
+ bits++;
+ dimm->dtype = DEV_X16;
+ break;
+ }
+ }
+
+ memsize = 1 << bits;
+
+ dimm->nr_pages = memsize >> PAGE_SHIFT;
+ dimm->grain = 4;
+ if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
+ dimm->mtype = MEM_DDR2;
+ else
+ dimm->mtype = MEM_DDR3;
+
+ val = ti_edac_readl(edac, EMIF_ECC_CTRL);
+ if (val & ECC_ENABLED)
+ dimm->edac_mode = EDAC_SECDED;
+ else
+ dimm->edac_mode = EDAC_NONE;
+}
+
+static const struct of_device_id ti_edac_of_match[] = {
+ { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
+ { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ {},
+};
+
+static int _emif_get_id(struct device_node *node)
+{
+ struct device_node *np;
+ const __be32 *addrp;
+ u32 addr, my_addr;
+ int my_id = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, ti_edac_of_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int ti_edac_probe(struct platform_device *pdev)
+{
+ int error_irq = 0, ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ const struct of_device_id *id;
+ struct ti_edac *edac;
+ int emif_id;
+
+ id = of_match_device(ti_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF controller regs not defined\n");
+ return PTR_ERR(reg);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ /* Allocate ID number for our EMIF controller */
+ emif_id = _emif_get_id(pdev->dev.of_node);
+ if (emif_id < 0)
+ return -EINVAL;
+
+ mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ edac = mci->pvt_info;
+ edac->reg = reg;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ /* Setup memory layout */
+ ti_edac_setup_dimm(mci, (u32)(id->data));
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+ if (!error_irq) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
+ "emif-edac-irq", mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "request_irq fail for EMIF EDAC irq\n");
+ goto err;
+ }
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register mci: %d.\n", ret);
+ goto err;
+ }
+
+ /* Generate an interrupt with each 1b error */
+ ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
+ EMIF_1B_ECC_ERR_THRSH);
+
+ /* Enable interrupts */
+ ti_edac_writel(edac,
+ EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
+ EMIF_IRQ_ENABLE_SET);
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+ return ret;
+}
+
+static int ti_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver ti_edac_driver = {
+ .probe = ti_edac_probe,
+ .remove = ti_edac_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = ti_edac_of_match,
+ },
+};
+
+module_platform_driver(ti_edac_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 981fba56bc18..1621f2f7f129 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -24,8 +24,6 @@
#include <linux/notifier.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/mfd/axp20x.h>
/* Power source status register */
@@ -79,11 +77,6 @@ enum axp288_extcon_reg {
AXP288_BC_DET_STAT_REG = 0x2f,
};
-enum axp288_mux_select {
- EXTCON_GPIO_MUX_SEL_PMIC = 0,
- EXTCON_GPIO_MUX_SEL_SOC,
-};
-
enum axp288_extcon_irq {
VBUS_FALLING_IRQ = 0,
VBUS_RISING_IRQ,
@@ -104,10 +97,8 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct gpio_desc *gpio_mux_cntl;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
- struct notifier_block extcon_nb;
unsigned int previous_cable;
};
@@ -197,15 +188,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
}
no_vbus:
- /*
- * If VBUS is absent Connect D+/D- lines to PMIC for BC
- * detection. Else connect them to SOC for USB communication.
- */
- if (info->gpio_mux_cntl)
- gpiod_set_value(info->gpio_mux_cntl,
- vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
- : EXTCON_GPIO_MUX_SEL_PMIC);
-
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -253,8 +235,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
- int ret, i, pirq, gpio;
+ int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -264,8 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
- if (pdata)
- info->gpio_mux_cntl = pdata->gpio_mux_cntl;
platform_set_drvdata(pdev, info);
@@ -286,21 +265,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
return ret;
}
- /* Set up gpio control for USB Mux */
- if (info->gpio_mux_cntl) {
- gpio = desc_to_gpio(info->gpio_mux_cntl);
- ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to request the gpio=%d\n", gpio);
- return ret;
- }
- gpiod_direction_output(info->gpio_mux_cntl,
- EXTCON_GPIO_MUX_SEL_PMIC);
- }
-
for (i = 0; i < EXTCON_IRQ_END; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ return pirq;
+
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6187f731b29d..6721ab01fe7d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -34,16 +34,26 @@ struct cros_ec_extcon_info {
struct notifier_block notifier;
+ unsigned int dr; /* data role */
+ bool pr; /* power role (true if VBUS enabled) */
bool dp; /* DisplayPort enabled */
bool mux; /* SuperSpeed (usb3) enabled */
unsigned int power_type;
};
static const unsigned int usb_type_c_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
EXTCON_DISP_DP,
EXTCON_NONE,
};
+enum usb_data_roles {
+ DR_NONE,
+ DR_HOST,
+ DR_DEVICE,
+};
+
/**
* cros_ec_pd_command() - Send a command to the EC.
* @info: pointer to struct cros_ec_extcon_info
@@ -150,6 +160,7 @@ static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
pd_control.port = info->port_id;
pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ pd_control.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
&pd_control, sizeof(pd_control),
&resp, sizeof(resp));
@@ -183,11 +194,72 @@ static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
return resp.num_ports;
}
+static const char *cros_ec_usb_role_string(unsigned int role)
+{
+ return role == DR_NONE ? "DISCONNECTED" :
+ (role == DR_HOST ? "DFP" : "UFP");
+}
+
+static const char *cros_ec_usb_power_type_string(unsigned int type)
+{
+ switch (type) {
+ case USB_CHG_TYPE_NONE:
+ return "USB_CHG_TYPE_NONE";
+ case USB_CHG_TYPE_PD:
+ return "USB_CHG_TYPE_PD";
+ case USB_CHG_TYPE_PROPRIETARY:
+ return "USB_CHG_TYPE_PROPRIETARY";
+ case USB_CHG_TYPE_C:
+ return "USB_CHG_TYPE_C";
+ case USB_CHG_TYPE_BC12_DCP:
+ return "USB_CHG_TYPE_BC12_DCP";
+ case USB_CHG_TYPE_BC12_CDP:
+ return "USB_CHG_TYPE_BC12_CDP";
+ case USB_CHG_TYPE_BC12_SDP:
+ return "USB_CHG_TYPE_BC12_SDP";
+ case USB_CHG_TYPE_OTHER:
+ return "USB_CHG_TYPE_OTHER";
+ case USB_CHG_TYPE_VBUS:
+ return "USB_CHG_TYPE_VBUS";
+ case USB_CHG_TYPE_UNKNOWN:
+ return "USB_CHG_TYPE_UNKNOWN";
+ default:
+ return "USB_CHG_TYPE_UNKNOWN";
+ }
+}
+
+static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
+ unsigned int role)
+{
+ switch (type) {
+ /* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
+ * because they identify with USB_CHG_TYPE_C, but we can't return true
+ * here from that code because that breaks Suzy-Q and other kinds of
+ * USB Type-C cables and peripherals.
+ */
+ case USB_CHG_TYPE_PROPRIETARY:
+ case USB_CHG_TYPE_BC12_DCP:
+ return true;
+ case USB_CHG_TYPE_PD:
+ case USB_CHG_TYPE_C:
+ case USB_CHG_TYPE_BC12_CDP:
+ case USB_CHG_TYPE_BC12_SDP:
+ case USB_CHG_TYPE_OTHER:
+ case USB_CHG_TYPE_VBUS:
+ case USB_CHG_TYPE_UNKNOWN:
+ case USB_CHG_TYPE_NONE:
+ default:
+ return false;
+ }
+}
+
static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
bool force)
{
struct device *dev = info->dev;
int role, power_type;
+ unsigned int dr = DR_NONE;
+ bool pr = false;
bool polarity = false;
bool dp = false;
bool mux = false;
@@ -206,9 +278,12 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dev_err(dev, "failed getting role err = %d\n", role);
return role;
}
+ dev_dbg(dev, "disconnected\n");
} else {
int pd_mux_state;
+ dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
+ pr = (role & PD_CTRL_RESP_ROLE_POWER);
pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
if (pd_mux_state < 0)
pd_mux_state = USB_PD_MUX_USB_ENABLED;
@@ -216,20 +291,62 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
- }
- if (force || info->dp != dp || info->mux != mux ||
- info->power_type != power_type) {
+ dev_dbg(dev,
+ "connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
+ role, power_type, dr, pr, polarity, mux, dp, hpd);
+ }
+ /*
+ * When there is no USB host (e.g. USB PD charger),
+ * we are not really a UFP for the AP.
+ */
+ if (dr == DR_DEVICE &&
+ cros_ec_usb_power_type_is_wall_wart(power_type, role))
+ dr = DR_NONE;
+
+ if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
+ info->mux != mux || info->power_type != power_type) {
+ bool host_connected = false, device_connected = false;
+
+ dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
+ cros_ec_usb_power_type_string(power_type),
+ cros_ec_usb_role_string(dr));
+ info->dr = dr;
+ info->pr = pr;
info->dp = dp;
info->mux = mux;
info->power_type = power_type;
- extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ if (dr == DR_DEVICE)
+ device_connected = true;
+ else if (dr == DR_HOST)
+ host_connected = true;
+ extcon_set_state(info->edev, EXTCON_USB, device_connected);
+ extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
@@ -237,6 +354,8 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_USB);
+ extcon_sync(info->edev, EXTCON_USB_HOST);
extcon_sync(info->edev, EXTCON_DISP_DP);
} else if (hpd) {
@@ -322,13 +441,28 @@ static int extcon_cros_ec_probe(struct platform_device *pdev)
return ret;
}
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD);
+ info->dr = DR_NONE;
+ info->pr = false;
+
platform_set_drvdata(pdev, info);
/* Get PD events from the EC */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index a301fcf46e88..523391bb3fbe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1784,10 +1784,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
{
struct client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &client->wait, pt);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 180f0a96528c..fee2e9e7ea20 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -328,11 +328,11 @@ nosy_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &client->buffer.wait, pt);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index fa87a055905e..e77f77caa0f3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -48,6 +48,14 @@ config ARM_SCPI_POWER_DOMAIN
This enables support for the SCPI power domains which can be
enabled or disabled via the SCP firmware
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index feaa890197f3..b248238ddc6a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 000000000000..1ea71640fdc2
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) (arg.event = event, \
+ arg.first_error = 0, \
+ atomic_set(&arg.errors, 0))
+
+static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ /* Not an error value ... */
+ return sdei_err;
+}
+
+/*
+ * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
+ * to translate.
+ */
+static int sdei_is_err(struct arm_smccc_res *res)
+{
+ switch (res->a0) {
+ case SDEI_NOT_SUPPORTED:
+ case SDEI_INVALID_PARAMETERS:
+ case SDEI_DENIED:
+ case SDEI_PENDING:
+ case SDEI_OUT_OF_RESOURCE:
+ return true;
+ }
+
+ return false;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err = 0;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ if (sdei_is_err(&res))
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ reg->event_num = event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ if (sdei_event_find(event_num)) {
+ kfree(event->registered);
+ kfree(event);
+ event = ERR_PTR(-EBUSY);
+ } else {
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+ }
+
+ return event;
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_del(&event->list);
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_enable);
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_disable);
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_unregister(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_unregister(event->event_num);
+
+ return sdei_do_cross_call(_local_event_unregister, event);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ do {
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ break;
+ }
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+
+ sdei_event_destroy(event);
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_unregister);
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON(preemptible());
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_register(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+
+
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err) {
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ return err;
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ do {
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ break;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num,
+ err);
+ break;
+ }
+
+ err = _sdei_event_register(event);
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num,
+ err);
+ }
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_register);
+
+static int sdei_reregister_event(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ err = _sdei_event_register(event);
+ if (err) {
+ pr_err("Failed to re-register event %u\n", event->event_num);
+ sdei_event_destroy(event);
+ return err;
+ }
+
+ if (event->reenable) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ }
+
+ if (err)
+ pr_err("Failed to re-enable event %u\n", event->event_num);
+
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_reregister_event(event);
+ if (err)
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_unregister(&arg);
+ if (arg.first_error)
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_register(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+
+ if (event->reenable) {
+ CROSSCALL_INIT(arg, event);
+ _local_event_enable(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err)
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+
+ return err;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return CONDUIT_INVALID;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+ }
+
+ return CONDUIT_INVALID;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err == -EOPNOTSUPP)
+ pr_err("advertised but not implemented in platform firmware\n");
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_dt(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np, *fw_np;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return false;
+
+ np = of_find_matching_node(fw_np, sdei_of_match);
+ of_node_put(fw_np);
+ if (!np)
+ return false;
+
+ pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
+ of_node_put(np);
+ if (!pdev)
+ return false;
+
+ return true;
+}
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct platform_device *pdev;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
+ 0);
+ if (IS_ERR(pdev))
+ return false;
+
+ return true;
+}
+
+static int __init sdei_init(void)
+{
+ if (sdei_present_dt() || sdei_present_acpi())
+ platform_driver_register(&sdei_driver);
+
+ return 0;
+}
+
+/*
+ * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
+ * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
+ * by device_initcall(). We want to be called in the middle.
+ */
+subsys_initcall_sync(sdei_init);
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ mm_segment_t orig_addr_limit;
+ u32 event_num = arg->event_num;
+
+ orig_addr_limit = get_fs();
+ set_fs(USER_DS);
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ set_fs(orig_addr_limit);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2b4c39fdfa91..6047ed4e8a3d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -159,13 +159,21 @@ config RESET_ATTACK_MITIGATION
using the TCG Platform Reset Attack Mitigation specification. This
protects against an attacker forcibly rebooting the system while it
still contains secrets in RAM, booting another OS and extracting the
- secrets.
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
endmenu
config UEFI_CPER
bool
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
config EFI_DEV_PATH_PARSER
bool
depends on ACPI
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 269501dfba53..cb805374f4bc 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -11,7 +11,7 @@
KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
obj-$(CONFIG_EFI) += capsule.o memmap.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
@@ -30,3 +30,4 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c4dd84..e456f4602df1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
#define NO_FURTHER_WRITE_ACTION -1
-#ifndef phys_to_page
-#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
-#endif
-
/**
* efi_free_all_buff_pages - free all previous allocated buffer pages
* @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
static void efi_free_all_buff_pages(struct capsule_info *cap_info)
{
while (cap_info->index > 0)
- __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
+ __free_page(cap_info->pages[--cap_info->index]);
cap_info->index = NO_FURTHER_WRITE_ACTION;
}
@@ -49,7 +45,7 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
if (pages_needed == 0) {
- pr_err("invalid capsule size");
+ pr_err("invalid capsule size\n");
return -EINVAL;
}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
cap_info->pages = temp_page;
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
return 0;
}
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
**/
static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
{
+ bool do_vunmap = false;
int ret;
- ret = efi_capsule_update(&cap_info->header, cap_info->pages);
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
if (ret) {
pr_err("capsule update failed\n");
return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
goto failed;
}
- cap_info->pages[cap_info->index++] = page_to_phys(page);
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
} else {
- page = phys_to_page(cap_info->pages[cap_info->index - 1]);
+ page = cap_info->pages[cap_info->index - 1];
}
kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
struct capsule_info *cap_info = file->private_data;
kfree(cap_info->pages);
+ kfree(cap_info->phys);
kfree(file->private_data);
file->private_data = NULL;
return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
file->private_data = cap_info;
return 0;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 000000000000..698e5c8e0c8d
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,356 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+#define INDENT_SP " "
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s%s", newpfx, INDENT_SP);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d2fcafcea07e..c165933ebf38 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -122,7 +122,7 @@ static const char * const proc_isa_strs[] = {
"ARM A64",
};
-static const char * const proc_error_type_strs[] = {
+const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
@@ -157,8 +157,8 @@ static void cper_print_proc_generic(const char *pfx,
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
- proc_error_type_strs,
- ARRAY_SIZE(proc_error_type_strs));
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
@@ -188,122 +188,6 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-static const char * const arm_reg_ctx_strs[] = {
- "AArch32 general purpose registers",
- "AArch32 EL1 context registers",
- "AArch32 EL2 context registers",
- "AArch32 secure context registers",
- "AArch64 general purpose registers",
- "AArch64 EL1 context registers",
- "AArch64 EL2 context registers",
- "AArch64 EL3 context registers",
- "Misc. system register structure",
-};
-
-static void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc)
-{
- int i, len, max_ctx_type;
- struct cper_arm_err_info *err_info;
- struct cper_arm_ctx_info *ctx_info;
- char newpfx[64];
-
- printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
-
- len = proc->section_length - (sizeof(*proc) +
- proc->err_info_num * (sizeof(*err_info)));
- if (len < 0) {
- printk("%ssection length: %d\n", pfx, proc->section_length);
- printk("%ssection length is too small\n", pfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
- return;
- }
-
- if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
- printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
- pfx, proc->mpidr);
-
- if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
- printk("%serror affinity level: %d\n", pfx,
- proc->affinity_level);
-
- if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
- printk("%srunning state: 0x%x\n", pfx, proc->running_state);
- printk("%sPower State Coordination Interface state: %d\n",
- pfx, proc->psci_state);
- }
-
- snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
-
- err_info = (struct cper_arm_err_info *)(proc + 1);
- for (i = 0; i < proc->err_info_num; i++) {
- printk("%sError info structure %d:\n", pfx, i);
-
- printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
-
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
- if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
- printk("%sfirst error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
- printk("%slast error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
- printk("%spropagated error captured\n",
- newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
- printk("%soverflow occurred, error info is incomplete\n",
- newpfx);
- }
-
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
- proc_error_type_strs[err_info->type] : "unknown");
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
- printk("%serror_info: 0x%016llx\n", newpfx,
- err_info->error_info);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
- printk("%svirtual fault address: 0x%016llx\n",
- newpfx, err_info->virt_fault_addr);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
- printk("%sphysical fault address: 0x%016llx\n",
- newpfx, err_info->physical_fault_addr);
- err_info += 1;
- }
-
- ctx_info = (struct cper_arm_ctx_info *)err_info;
- max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
- for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
-
- printk("%sContext info structure %d:\n", pfx, i);
- if (len < size) {
- printk("%ssection length is too small\n", newpfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- return;
- }
- if (ctx_info->type > max_ctx_type) {
- printk("%sInvalid context type: %d (max: %d)\n",
- newpfx, ctx_info->type, max_ctx_type);
- return;
- }
- printk("%sregister context type: %s\n", newpfx,
- arm_reg_ctx_strs[ctx_info->type]);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
- (ctx_info + 1), ctx_info->size, 0);
- len -= size;
- ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
- }
-
- if (len > 0) {
- printk("%sVendor specific error info has %u bytes:\n", pfx,
- len);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
- len, true);
- }
-}
-#endif
-
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 557a47829d03..cd42f66a7c85 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR
};
EXPORT_SYMBOL(efi);
@@ -464,6 +465,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{NULL_GUID, NULL, NULL},
};
@@ -552,6 +554,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (efi_enabled(EFI_MEMMAP))
efi_memattr_init();
+ efi_tpm_eventlog_init();
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -608,7 +612,7 @@ static int __init efi_load_efivars(void)
return 0;
pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(efi_load_efivars);
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index adaa4a964f0c..7b3ba40f0745 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -30,8 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o
-lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 6224cdbc9669..da661bf8cb96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -4,15 +4,18 @@
* Copyright (C) 2016 CoreOS, Inc
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
*
* This file is part of the Linux kernel, and is made available under the
* terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
#include <asm/efi.h>
#include "efistub.h"
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
@@ -56,3 +59,81 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location, log_last_entry;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ void *tcg2_protocol;
+
+ status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
+ &tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
+ &log_location, &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (!log_location)
+ return;
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size,
+ (void **) &log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg,
+ "Unable to allocate memory for event log\n");
+ return;
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_call_early(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_call_early(free_pool, log_tbl);
+}
+
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+{
+ /* Only try to retrieve the logs in 1.2 format. */
+ efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
+}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 000000000000..0cbeb3d46b18
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned int tbl_size;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return 0;
+}
+
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index d687ca3d5049..8b25d31e8401 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -496,6 +496,8 @@ static void __init psci_init_migrate(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
psci_function_id[PSCI_FN_CPU_SUSPEND] =
PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f810e5df..bb1c068bff19 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
return 0;
}
-static int find_clusters(const struct cpumask *cpus,
- const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+ const struct cpumask **cpu_groups)
{
unsigned int nb = 0;
cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
cpumask_copy(tmp, cpus);
while (!cpumask_empty(tmp)) {
- const struct cpumask *cluster =
+ const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
- clusters[nb++] = cluster;
- cpumask_andnot(tmp, tmp, cluster);
+ cpu_groups[nb++] = cpu_group;
+ cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
{
int err;
cpumask_var_t offlined_cpus;
- int i, nb_cluster;
- const struct cpumask **clusters;
+ int i, nb_cpu_group;
+ const struct cpumask **cpu_groups;
char *page_buf;
err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus clusters. */
- clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
- GFP_KERNEL);
- if (!clusters)
+ /* We may have up to nb_available_cpus cpu_groups. */
+ cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
- goto out_free_clusters;
+ goto out_free_cpu_groups;
err = 0;
- nb_cluster = find_clusters(cpu_online_mask, clusters);
+ nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
/*
* Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
- * Take down CPUs by cluster this time. When the last CPU is turned
- * off, the cluster itself should shut down.
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
*/
- for (i = 0; i < nb_cluster; ++i) {
- int cluster_id =
- topology_physical_package_id(cpumask_any(clusters[i]));
+ for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
- clusters[i]);
+ cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
- pr_info("Trying to turn off and on again cluster %d "
- "(CPUs %s)\n", cluster_id, page_buf);
- err += down_and_up_cpus(clusters[i], offlined_cpus);
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
-out_free_clusters:
- kfree(clusters);
+out_free_cpu_groups:
+ kfree(cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d6a8e851ad13..8dbb2280538d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -122,12 +122,6 @@ config GPIO_ATH79
Select this option to enable GPIO driver for
Atheros AR71XX/AR724X/AR913X SoC devices.
-config GPIO_AXP209
- tristate "X-Powers AXP209 PMIC GPIO Support"
- depends on MFD_AXP20X
- help
- Say yes to enable GPIO support for the AXP209 PMIC
-
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -704,6 +698,22 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WINBOND
+ tristate "Winbond Super I/O GPIO support"
+ depends on ISA_BUS_API
+ help
+ This option enables support for GPIOs found on Winbond Super I/O
+ chips.
+ Currently, only W83627UHG (also known as Nuvoton NCT6627UD) is
+ supported.
+
+ You will need to provide a module parameter "gpios", or a
+ boot-time parameter "gpio_winbond.gpios" with a bitmask of GPIO
+ ports to enable (bit 0 is GPIO1, bit 1 is GPIO2, etc.).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-winbond.
+
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
depends on ISA_BUS_API
@@ -1234,6 +1244,16 @@ config GPIO_PCI_IDIO_16
low). Input filter control is not supported by this driver, and the
input filters are deactivated by this driver.
+config GPIO_PCIE_IDIO_24
+ tristate "ACCES PCIe-IDIO-24 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES PCIe-IDIO-24 family (PCIe-IDIO-24,
+ PCIe-IDI-24, PCIe-IDO-24, PCIe-IDIO-12). An interrupt is generated
+ when any of the inputs change state (low to high or high to low).
+ Input filter control is not supported by this driver, and the input
+ filters are deactivated by this driver.
+
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
select MFD_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4bc24febb889..cccb0d40846c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
-obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -96,6 +95,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
+obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
+obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index afbff155a0ba..e82cc763633c 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -125,6 +125,48 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get_index);
/**
+ * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @dev: device for lifecycle management
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc **dr;
+ struct gpio_desc *desc;
+
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+ }
+
+ *dr = desc;
+ devres_add(dev, dr);
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+
+/**
* devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
* device's child node
* @dev: GPIO consumer
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 15a1f4b348c4..fb7b620763a2 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,12 +9,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index abf199609546..21452622d954 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -12,8 +12,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mfd/adp5520.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
struct adp5520_gpio {
struct device *master;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e717f8dc3966..3530ccd17e04 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 8e76d390e653..8c3ff6e2366f 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -18,7 +18,8 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/of_gpio.h> /* For of_mm_gpio_chip */
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 30ad7d7c1678..fdcebe59510d 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -28,7 +28,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index d4e6ba0301bc..ba51ea15f379 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 6b3ca6601af2..77e485557498 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -60,6 +60,7 @@ struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
uint16_t debounce_regs;
+ uint16_t tolerance_regs;
const char names[4][3];
};
@@ -70,48 +71,56 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.val_regs = 0x0000,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
+ .tolerance_regs = 0x001c,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
+ .tolerance_regs = 0x003c,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
+ .tolerance_regs = 0x00ac,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
+ .tolerance_regs = 0x00fc,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
+ .tolerance_regs = 0x012c,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
+ .tolerance_regs = 0x015c,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
+ .tolerance_regs = 0x018c,
.names = { "Y", "Z", "AA", "AB" },
},
{
- .val_regs = 0x01E8,
- .irq_regs = 0x01A8,
+ .val_regs = 0x01e8,
+ .irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
+ .tolerance_regs = 0x01bc,
.names = { "AC", "", "", "" },
},
};
@@ -140,7 +149,7 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
- WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+ WARN_ON(bank >= ARRAY_SIZE(aspeed_gpio_banks));
return &aspeed_gpio_banks[bank];
}
@@ -534,6 +543,30 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
return 0;
}
+static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
+ unsigned int offset, bool enable)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ const struct aspeed_gpio_bank *bank;
+ unsigned long flags;
+ u32 val;
+
+ bank = to_bank(offset);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ val = readl(gpio->base + bank->tolerance_regs);
+
+ if (enable)
+ val |= GPIO_BIT(offset);
+ else
+ val &= ~GPIO_BIT(offset);
+
+ writel(val, gpio->base + bank->tolerance_regs);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
if (!have_gpio(gpiochip_get_data(chip), offset))
@@ -771,6 +804,8 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
return -ENOTSUPP;
+ else if (param == PIN_CONFIG_PERSIST_STATE)
+ return aspeed_gpio_reset_tolerance(chip, offset, arg);
return -ENOTSUPP;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 5fad89dfab7e..3ae7c1876bf4 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -324,3 +324,6 @@ static struct platform_driver ath79_gpio_driver = {
};
module_platform_driver(ath79_gpio_driver);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
deleted file mode 100644
index 4a346b7b4172..000000000000
--- a/drivers/gpio/gpio-axp209.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * AXP20x GPIO driver
- *
- * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/axp20x.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#define AXP20X_GPIO_FUNCTIONS 0x7
-#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
-#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
-#define AXP20X_GPIO_FUNCTION_INPUT 2
-
-struct axp20x_gpio {
- struct gpio_chip chip;
- struct regmap *regmap;
-};
-
-static int axp20x_gpio_get_reg(unsigned offset)
-{
- switch (offset) {
- case 0:
- return AXP20X_GPIO0_CTRL;
- case 1:
- return AXP20X_GPIO1_CTRL;
- case 2:
- return AXP20X_GPIO2_CTRL;
- }
-
- return -EINVAL;
-}
-
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- AXP20X_GPIO_FUNCTION_INPUT);
-}
-
-static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
- if (ret)
- return ret;
-
- return !!(val & BIT(offset + 4));
-}
-
-static int axp20x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- ret = regmap_read(gpio->regmap, reg, &val);
- if (ret)
- return ret;
-
- /*
- * This shouldn't really happen if the pin is in use already,
- * or if it's not in use yet, it doesn't matter since we're
- * going to change the value soon anyway. Default to output.
- */
- if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
-
- /*
- * The GPIO directions are the three lowest values.
- * 2 is input, 0 and 1 are output
- */
- return val & 2;
-}
-
-static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
- : AXP20X_GPIO_FUNCTION_OUT_LOW);
-}
-
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- axp20x_gpio_output(chip, offset, value);
-}
-
-static int axp20x_gpio_probe(struct platform_device *pdev)
-{
- struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp20x_gpio *gpio;
- int ret;
-
- if (!of_device_is_available(pdev->dev.of_node))
- return -ENODEV;
-
- if (!axp20x) {
- dev_err(&pdev->dev, "Parent drvdata not set\n");
- return -EINVAL;
- }
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio)
- return -ENOMEM;
-
- gpio->chip.base = -1;
- gpio->chip.can_sleep = true;
- gpio->chip.parent = &pdev->dev;
- gpio->chip.label = dev_name(&pdev->dev);
- gpio->chip.owner = THIS_MODULE;
- gpio->chip.get = axp20x_gpio_get;
- gpio->chip.get_direction = axp20x_gpio_get_direction;
- gpio->chip.set = axp20x_gpio_set;
- gpio->chip.direction_input = axp20x_gpio_input;
- gpio->chip.direction_output = axp20x_gpio_output;
- gpio->chip.ngpio = 3;
-
- gpio->regmap = axp20x->regmap;
-
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register GPIO chip\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
-
- return 0;
-}
-
-static const struct of_device_id axp20x_gpio_match[] = {
- { .compatible = "x-powers,axp209-gpio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
-
-static struct platform_driver axp20x_gpio_driver = {
- .probe = axp20x_gpio_probe,
- .driver = {
- .name = "axp20x-gpio",
- .of_match_table = axp20x_gpio_match,
- },
-};
-
-module_platform_driver(axp20x_gpio_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index dfcf56ee3c61..eb8369b21e90 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -17,7 +17,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/init.h>
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return val ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ return !!val;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, d->host_data);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
irq_set_noprobe(irq);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 545d43a587b7..16c7f9f49416 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -19,7 +19,6 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
@@ -327,6 +326,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key brcmstb_gpio_irq_lock_class;
+static struct lock_class_key brcmstb_gpio_irq_request_class;
static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +346,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, &bank->gc);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
+ &brcmstb_gpio_irq_request_class);
irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index acefb25e8eca..b8ec75cbd4b5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -46,7 +46,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Steal the hardware definitions from the bttv driver. */
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index b6f0f729656c..58531d8b8c6e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 90278b19aa0e..8814c8f47e57 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -12,7 +12,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#include <asm/msr.h>
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index dd8977cf3e85..b6d3e997eb26 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -15,7 +15,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/syscalls.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 82053b52cba0..2f1b5d23b10c 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -13,7 +13,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e4b3d7db68c9..0b951ca78ec4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -9,7 +9,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 7b3394fdc624..b7a3a2db699b 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -176,8 +176,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
return PTR_ERR(g->base);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 98c7ff2a76e7..8d62db447ec1 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
return platform_driver_register(&iop3xx_gpio_driver);
}
arch_initcall(iop3xx_gpio_init);
+
+MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d43d0a2cc4c5..efb46edff81f 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -414,6 +414,6 @@ static void __exit it87_gpio_exit(void)
module_init(it87_gpio_init);
module_exit(it87_gpio_exit);
-MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
+MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index c04fae1ba32a..9d8bcc69f245 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -709,8 +709,7 @@ static int max732x_probe(struct i2c_client *client,
return 0;
out_failed:
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return ret;
}
@@ -734,8 +733,7 @@ static int max732x_remove(struct i2c_client *client)
gpiochip_remove(&chip->gpio_chip);
/* unregister any dummy i2c_client */
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31ac337..c38624ea0251 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+ const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+ return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mrfld_gpio_pinrange *range;
+ const char *pinctrl_dev_name;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
- "pinctrl-merrifield",
+ pinctrl_dev_name,
range->gpio_base,
range->pin_base,
range->npins);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f9042bcc27a4..7b14d6280e44 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
{
unsigned long get_mask = 0;
unsigned long set_mask = 0;
- int bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
- if (gc->bgpio_dir & BIT(bit))
- set_mask |= BIT(bit);
- else
- get_mask |= BIT(bit);
- }
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
+ /* Exploit the fact that we know which directions are set */
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
/*
* This only works if the bits in the GPIO register are in native endianness.
- * It is dirt simple and fast in this case. (Also the most common case.)
*/
static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
-
- *bits = gc->read_reg(gc->reg_dat) & *mask;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+ *bits |= gc->read_reg(gc->reg_dat) & *mask;
return 0;
}
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
unsigned long val;
int bit;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
/* Create a mirrored mask */
- bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
readmask |= bgpio_line2mask(gc, bit);
/* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
* Mirror the result into the "bits" result, this will give line 0
* in bit 0 ... line 31 in bit 31 for a 32bit register.
*/
- bit = 0;
- while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
*bits |= bgpio_line2mask(gc, bit);
return 0;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 9532d86a82f7..3a545ad17817 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -27,13 +27,15 @@
#include "gpiolib.h"
#define GPIO_MOCKUP_NAME "gpio-mockup"
-#define GPIO_MOCKUP_MAX_GC 10
+#define GPIO_MOCKUP_MAX_GC 10
/*
* We're storing two values per chip: the GPIO base and the number
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
+#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
+
enum {
GPIO_MOCKUP_DIR_OUT = 0,
GPIO_MOCKUP_DIR_IN = 1,
@@ -46,7 +48,7 @@ enum {
*/
struct gpio_mockup_line_status {
int dir;
- bool value;
+ int value;
};
struct gpio_mockup_chip {
@@ -62,17 +64,33 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
+struct gpio_mockup_platform_data {
+ int base;
+ int ngpio;
+ int index;
+ bool named_lines;
+};
+
static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
-static int gpio_mockup_params_nr;
-module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
+static int gpio_mockup_num_ranges;
+module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400);
static bool gpio_mockup_named_lines;
module_param_named(gpio_mockup_named_lines,
gpio_mockup_named_lines, bool, 0400);
-static const char gpio_mockup_name_start = 'A';
static struct dentry *gpio_mockup_dbg_dir;
+static int gpio_mockup_range_base(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2];
+}
+
+static int gpio_mockup_range_ngpio(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2 + 1];
+}
+
static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -80,16 +98,26 @@ static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].value;
}
-static void gpio_mockup_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
chip->lines[offset].value = !!value;
}
-static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, mask, gc->ngpio)
+ gpio_mockup_set(gc, bit, test_bit(bit, bits));
+
+}
+
+static int gpio_mockup_dirout(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -115,29 +143,6 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].dir;
}
-static int gpio_mockup_name_lines(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- char **names;
- int i;
-
- names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
- if (!names)
- return -ENOMEM;
-
- for (i = 0; i < gc->ngpio; i++) {
- names[i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d", gc->label, i);
- if (!names[i])
- return -ENOMEM;
- }
-
- gc->names = (const char *const *)names;
-
- return 0;
-}
-
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -188,15 +193,21 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
struct gpio_mockup_dbgfs_private *priv;
- struct dentry *evfile;
+ struct dentry *evfile, *link;
struct gpio_chip *gc;
+ const char *devname;
char *name;
int i;
gc = &chip->gc;
+ devname = dev_name(&gc->gpiodev->dev);
- chip->dbg_dir = debugfs_create_dir(gc->label, gpio_mockup_dbg_dir);
- if (!chip->dbg_dir)
+ chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir);
+ if (IS_ERR_OR_NULL(chip->dbg_dir))
+ goto err;
+
+ link = debugfs_create_symlink(gc->label, gpio_mockup_dbg_dir, devname);
+ if (IS_ERR_OR_NULL(link))
goto err;
for (i = 0; i < gc->ngpio; i++) {
@@ -214,23 +225,63 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_event_ops);
- if (!evfile)
+ if (IS_ERR_OR_NULL(evfile))
goto err;
}
return;
err:
- dev_err(dev, "error creating debugfs directory\n");
+ dev_err(dev, "error creating debugfs event files\n");
}
-static int gpio_mockup_add(struct device *dev,
- struct gpio_mockup_chip *chip,
- const char *name, int base, int ngpio)
+static int gpio_mockup_name_lines(struct device *dev,
+ struct gpio_mockup_chip *chip)
{
struct gpio_chip *gc = &chip->gc;
- int ret;
+ char **names;
+ int i;
+
+ names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", gc->label, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ gc->names = (const char *const *)names;
+
+ return 0;
+}
+
+static int gpio_mockup_probe(struct platform_device *pdev)
+{
+ struct gpio_mockup_platform_data *pdata;
+ struct gpio_mockup_chip *chip;
+ struct gpio_chip *gc;
+ int rv, base, ngpio;
+ struct device *dev;
+ char *name;
+
+ dev = &pdev->dev;
+ pdata = dev_get_platdata(dev);
+ base = pdata->base;
+ ngpio = pdata->ngpio;
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-%c",
+ pdev->name, pdata->index);
+ if (!name)
+ return -ENOMEM;
+
+ gc = &chip->gc;
gc->base = base;
gc->ngpio = ngpio;
gc->label = name;
@@ -238,6 +289,7 @@ static int gpio_mockup_add(struct device *dev,
gc->parent = dev;
gc->get = gpio_mockup_get;
gc->set = gpio_mockup_set;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
@@ -248,19 +300,19 @@ static int gpio_mockup_add(struct device *dev,
if (!chip->lines)
return -ENOMEM;
- if (gpio_mockup_named_lines) {
- ret = gpio_mockup_name_lines(dev, chip);
- if (ret)
- return ret;
+ if (pdata->named_lines) {
+ rv = gpio_mockup_name_lines(dev, chip);
+ if (rv)
+ return rv;
}
- ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (ret)
- return ret;
+ rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
+ if (rv < 0)
+ return rv;
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
- if (ret)
- return ret;
+ rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (rv)
+ return rv;
if (gpio_mockup_dbg_dir)
gpio_mockup_debugfs_setup(dev, chip);
@@ -268,58 +320,6 @@ static int gpio_mockup_add(struct device *dev,
return 0;
}
-static int gpio_mockup_probe(struct platform_device *pdev)
-{
- int ret, i, base, ngpio, num_chips;
- struct device *dev = &pdev->dev;
- struct gpio_mockup_chip *chips;
- char *chip_name;
-
- if (gpio_mockup_params_nr < 2 || (gpio_mockup_params_nr % 2))
- return -EINVAL;
-
- /* Each chip is described by two values. */
- num_chips = gpio_mockup_params_nr / 2;
-
- chips = devm_kcalloc(dev, num_chips, sizeof(*chips), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, chips);
-
- for (i = 0; i < num_chips; i++) {
- base = gpio_mockup_ranges[i * 2];
-
- if (base == -1)
- ngpio = gpio_mockup_ranges[i * 2 + 1];
- else
- ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
-
- if (ngpio >= 0) {
- chip_name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%c", GPIO_MOCKUP_NAME,
- gpio_mockup_name_start + i);
- if (!chip_name)
- return -ENOMEM;
-
- ret = gpio_mockup_add(dev, &chips[i],
- chip_name, base, ngpio);
- } else {
- ret = -EINVAL;
- }
-
- if (ret) {
- dev_err(dev,
- "adding gpiochip failed: %d (base: %d, ngpio: %d)\n",
- ret, base, base < 0 ? ngpio : base + ngpio);
-
- return ret;
- }
- }
-
- return 0;
-}
-
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = GPIO_MOCKUP_NAME,
@@ -327,44 +327,88 @@ static struct platform_driver gpio_mockup_driver = {
.probe = gpio_mockup_probe,
};
-static struct platform_device *pdev;
-static int __init mock_device_init(void)
+static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
+
+static void gpio_mockup_unregister_pdevs(void)
{
- int err;
+ struct platform_device *pdev;
+ int i;
- gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
- if (!gpio_mockup_dbg_dir)
- pr_err("%s: error creating debugfs directory\n",
- GPIO_MOCKUP_NAME);
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+ pdev = gpio_mockup_pdevs[i];
- pdev = platform_device_alloc(GPIO_MOCKUP_NAME, -1);
- if (!pdev)
- return -ENOMEM;
+ if (pdev)
+ platform_device_unregister(pdev);
+ }
+}
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return err;
+static int __init gpio_mockup_init(void)
+{
+ int i, num_chips, err = 0, index = 'A';
+ struct gpio_mockup_platform_data pdata;
+ struct platform_device *pdev;
+
+ if ((gpio_mockup_num_ranges < 2) ||
+ (gpio_mockup_num_ranges % 2) ||
+ (gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
+ return -EINVAL;
+
+ /* Each chip is described by two values. */
+ num_chips = gpio_mockup_num_ranges / 2;
+
+ /*
+ * The second value in the <base GPIO - number of GPIOS> pair must
+ * always be greater than 0.
+ */
+ for (i = 0; i < num_chips; i++) {
+ if (gpio_mockup_range_ngpio(i) < 0)
+ return -EINVAL;
}
+ gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
+ if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
+ gpio_mockup_err("error creating debugfs directory\n");
+
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
- platform_device_unregister(pdev);
+ gpio_mockup_err("error registering platform driver\n");
return err;
}
+ for (i = 0; i < num_chips; i++) {
+ pdata.index = index++;
+ pdata.base = gpio_mockup_range_base(i);
+ pdata.ngpio = pdata.base < 0
+ ? gpio_mockup_range_ngpio(i)
+ : gpio_mockup_range_ngpio(i) - pdata.base;
+ pdata.named_lines = gpio_mockup_named_lines;
+
+ pdev = platform_device_register_resndata(NULL,
+ GPIO_MOCKUP_NAME,
+ i, NULL, 0, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev)) {
+ gpio_mockup_err("error registering device");
+ platform_driver_unregister(&gpio_mockup_driver);
+ gpio_mockup_unregister_pdevs();
+ return PTR_ERR(pdev);
+ }
+
+ gpio_mockup_pdevs[i] = pdev;
+ }
+
return 0;
}
-static void __exit mock_device_exit(void)
+static void __exit gpio_mockup_exit(void)
{
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- platform_device_unregister(pdev);
+ gpio_mockup_unregister_pdevs();
}
-module_init(mock_device_init);
-module_exit(mock_device_exit);
+module_init(gpio_mockup_init);
+module_exit(gpio_mockup_exit);
MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
MODULE_AUTHOR("Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e136d666f1e5..ab5035b96886 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1058,7 +1058,9 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
+ struct gpio_irq_chip *irq;
static int gpio;
+ const char *label;
int irq_base = 0;
int ret;
@@ -1080,21 +1082,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.parent = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
- bank->chip.label = "gpio";
+ label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d",
+ gpio, gpio + bank->width - 1);
+ if (!label)
+ return -ENOMEM;
+ bank->chip.label = label;
bank->chip.base = gpio;
}
bank->chip.ngpio = bank->width;
- ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
-
- if (!bank->is_mpuio)
- gpio += bank->width;
-
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1115,25 +1111,30 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irqc->irq_set_wake = NULL;
}
- ret = gpiochip_irqchip_add(&bank->chip, irqc,
- irq_base, handle_bad_irq,
- IRQ_TYPE_NONE);
+ irq = &bank->chip.irq;
+ irq->chip = irqc;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->num_parents = 1;
+ irq->parents = &bank->irq;
+ irq->first = irq_base;
+ ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
dev_err(bank->chip.parent,
- "Couldn't add irqchip to gpiochip %d\n", ret);
- gpiochip_remove(&bank->chip);
- return -ENODEV;
+ "Could not register gpio chip %d\n", ret);
+ return ret;
}
- gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
-
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
+ if (!bank->is_mpuio)
+ gpio += bank->width;
+
return ret;
}
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
new file mode 100644
index 000000000000..f666e2e69074
--- /dev/null
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the ACCES PCIe-IDIO-24 family
+ * Copyright (C) 2018 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: PCIe-IDIO-24,
+ * PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct idio_24_gpio_reg - GPIO device registers structure
+ * @out0_7: Read: FET Outputs 0-7
+ * Write: FET Outputs 0-7
+ * @out8_15: Read: FET Outputs 8-15
+ * Write: FET Outputs 8-15
+ * @out16_23: Read: FET Outputs 16-23
+ * Write: FET Outputs 16-23
+ * @ttl_out0_7: Read: TTL/CMOS Outputs 0-7
+ * Write: TTL/CMOS Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Reserved
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: Reserved
+ * @in16_23: Read: Isolated Inputs 16-23
+ * Write: Reserved
+ * @ttl_in0_7: Read: TTL/CMOS Inputs 0-7
+ * Write: Reserved
+ * @cos0_7: Read: COS Status Inputs 0-7
+ * Write: COS Clear Inputs 0-7
+ * @cos8_15: Read: COS Status Inputs 8-15
+ * Write: COS Clear Inputs 8-15
+ * @cos16_23: Read: COS Status Inputs 16-23
+ * Write: COS Clear Inputs 16-23
+ * @cos_ttl0_7: Read: COS Status TTL/CMOS 0-7
+ * Write: COS Clear TTL/CMOS 0-7
+ * @ctl: Read: Control Register
+ * Write: Control Register
+ * @reserved: Read: Reserved
+ * Write: Reserved
+ * @cos_enable: Read: COS Enable
+ * Write: COS Enable
+ * @soft_reset: Read: IRQ Output Pin Status
+ * Write: Software Board Reset
+ */
+struct idio_24_gpio_reg {
+ u8 out0_7;
+ u8 out8_15;
+ u8 out16_23;
+ u8 ttl_out0_7;
+ u8 in0_7;
+ u8 in8_15;
+ u8 in16_23;
+ u8 ttl_in0_7;
+ u8 cos0_7;
+ u8 cos8_15;
+ u8 cos16_23;
+ u8 cos_ttl0_7;
+ u8 ctl;
+ u8 reserved;
+ u8 cos_enable;
+ u8 soft_reset;
+};
+
+/**
+ * struct idio_24_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the GPIO device registers
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct idio_24_gpio {
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct idio_24_gpio_reg __iomem *reg;
+ unsigned long irq_mask;
+};
+
+static int idio_24_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 24)
+ return 0;
+
+ /* Isolated Inputs */
+ if (offset < 48)
+ return 1;
+
+ /* TTL/CMOS I/O */
+ /* OUT MODE = 1 when TTL/CMOS Output Mode is set */
+ return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+}
+
+static int idio_24_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Clear TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) & ~out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ return 0;
+}
+
+static int idio_24_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Set TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) | out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int idio_24_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long offset_mask = BIT(offset % 8);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 8)
+ return !!(ioread8(&idio24gpio->reg->out0_7) & offset_mask);
+
+ if (offset < 16)
+ return !!(ioread8(&idio24gpio->reg->out8_15) & offset_mask);
+
+ if (offset < 24)
+ return !!(ioread8(&idio24gpio->reg->out16_23) & offset_mask);
+
+ /* Isolated Inputs */
+ if (offset < 32)
+ return !!(ioread8(&idio24gpio->reg->in0_7) & offset_mask);
+
+ if (offset < 40)
+ return !!(ioread8(&idio24gpio->reg->in8_15) & offset_mask);
+
+ if (offset < 48)
+ return !!(ioread8(&idio24gpio->reg->in16_23) & offset_mask);
+
+ /* TTL/CMOS Outputs */
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return !!(ioread8(&idio24gpio->reg->ttl_out0_7) & offset_mask);
+
+ /* TTL/CMOS Inputs */
+ return !!(ioread8(&idio24gpio->reg->ttl_in0_7) & offset_mask);
+}
+
+static void idio_24_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+ void __iomem *base;
+ const unsigned int mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned int out_state;
+
+ /* Isolated Inputs */
+ if (offset > 23 && offset < 48)
+ return;
+
+ /* TTL/CMOS Inputs */
+ if (offset > 47 && !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
+ return;
+
+ /* TTL/CMOS Outputs */
+ if (offset > 47)
+ base = &idio24gpio->reg->ttl_out0_7;
+ /* FET Outputs */
+ else if (offset > 15)
+ base = &idio24gpio->reg->out16_23;
+ else if (offset > 7)
+ base = &idio24gpio->reg->out8_15;
+ else
+ base = &idio24gpio->reg->out0_7;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ if (value)
+ out_state = ioread8(base) | mask;
+ else
+ out_state = ioread8(base) & ~mask;
+
+ iowrite8(out_state, base);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_ack(struct irq_data *data)
+{
+}
+
+static void idio_24_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ unsigned char new_irq_mask;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ idio24gpio->irq_mask &= BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+
+ if (!new_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Disable Rising Edge detection */
+ cos_enable_state &= ~BIT(bank_offset);
+ /* Disable Falling Edge detection */
+ cos_enable_state &= ~BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned char prev_irq_mask;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask |= BIT(bit_offset);
+
+ if (!prev_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Enable Rising Edge detection */
+ cos_enable_state |= BIT(bank_offset);
+ /* Enable Falling Edge detection */
+ cos_enable_state |= BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* The only valid irq types are none and both-edges */
+ if (flow_type != IRQ_TYPE_NONE &&
+ (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip idio_24_irqchip = {
+ .name = "pcie-idio-24",
+ .irq_ack = idio_24_irq_ack,
+ .irq_mask = idio_24_irq_mask,
+ .irq_unmask = idio_24_irq_unmask,
+ .irq_set_type = idio_24_irq_set_type
+};
+
+static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
+{
+ struct idio_24_gpio *const idio24gpio = dev_id;
+ unsigned long irq_status;
+ struct gpio_chip *const chip = &idio24gpio->chip;
+ unsigned long irq_mask;
+ int gpio;
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Read Change-Of-State status */
+ irq_status = ioread32(&idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ /* Make sure our device generated IRQ */
+ if (!irq_status)
+ return IRQ_NONE;
+
+ /* Handle only unmasked IRQ */
+ irq_mask = idio24gpio->irq_mask & irq_status;
+
+ for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
+ gpio + 24));
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Clear Change-Of-State status */
+ iowrite32(irq_status, &idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define IDIO_24_NGPIO 56
+static const char *idio_24_names[IDIO_24_NGPIO] = {
+ "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
+ "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
+ "OUT16", "OUT17", "OUT18", "OUT19", "OUT20", "OUT21", "OUT22", "OUT23",
+ "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
+ "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15",
+ "IIN16", "IIN17", "IIN18", "IIN19", "IIN20", "IIN21", "IIN22", "IIN23",
+ "TTL0", "TTL1", "TTL2", "TTL3", "TTL4", "TTL5", "TTL6", "TTL7"
+};
+
+static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *const dev = &pdev->dev;
+ struct idio_24_gpio *idio24gpio;
+ int err;
+ const size_t pci_bar_index = 2;
+ const char *const name = pci_name(pdev);
+
+ idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
+ if (!idio24gpio)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device (%d)\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ if (err) {
+ dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+ return err;
+ }
+
+ idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+
+ idio24gpio->chip.label = name;
+ idio24gpio->chip.parent = dev;
+ idio24gpio->chip.owner = THIS_MODULE;
+ idio24gpio->chip.base = -1;
+ idio24gpio->chip.ngpio = IDIO_24_NGPIO;
+ idio24gpio->chip.names = idio_24_names;
+ idio24gpio->chip.get_direction = idio_24_gpio_get_direction;
+ idio24gpio->chip.direction_input = idio_24_gpio_direction_input;
+ idio24gpio->chip.direction_output = idio_24_gpio_direction_output;
+ idio24gpio->chip.get = idio_24_gpio_get;
+ idio24gpio->chip.set = idio_24_gpio_set;
+
+ raw_spin_lock_init(&idio24gpio->lock);
+
+ /* Software board reset */
+ iowrite8(0, &idio24gpio->reg->soft_reset);
+
+ err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ err = gpiochip_irqchip_add(&idio24gpio->chip, &idio_24_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, pdev->irq, idio_24_irq_handler, IRQF_SHARED,
+ name, idio24gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id idio_24_pci_dev_id[] = {
+ { PCI_DEVICE(0x494F, 0x0FD0) }, { PCI_DEVICE(0x494F, 0x0BD0) },
+ { PCI_DEVICE(0x494F, 0x07D0) }, { PCI_DEVICE(0x494F, 0x0FC0) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, idio_24_pci_dev_id);
+
+static struct pci_driver idio_24_driver = {
+ .name = "pcie-idio-24",
+ .id_table = idio_24_pci_dev_id,
+ .probe = idio_24_probe
+};
+
+module_pci_driver(idio_24_driver);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES PCIe-IDIO-24 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 23e771dba4c1..e85903eddc68 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
struct gpio_reg *r = to_gpio_reg(gc);
int irq = r->irqs[offset];
- if (irq >= 0 && r->irq.domain)
- irq = irq_find_mapping(r->irq.domain, irq);
+ if (irq >= 0 && r->irqdomain)
+ irq = irq_find_mapping(r->irqdomain, irq);
return irq;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e6e5cca624a7..f8d7d1cd8488 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
};
int i, j;
+ /*
+ * STMPE1600: to be able to get IRQ from pins,
+ * a read must be done on GPMR register, or a write in
+ * GPSR or GPCR registers
+ */
+ if (stmpe->partnum == STMPE1600) {
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ }
+
for (i = 0; i < CACHE_NR_REGS; i++) {
/* STMPE801 and STMPE1600 don't have RE and FE registers */
if ((stmpe->partnum == STMPE801 ||
@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
- struct stmpe *stmpe = stmpe_gpio->stmpe;
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
-
- /*
- * STMPE1600 workaround: to be able to get IRQ from pins,
- * a read must be done on GPMR register, or a write in
- * GPSR or GPCR registers
- */
- if (stmpe->partnum == STMPE1600)
- stmpe_reg_read(stmpe,
- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
}
static void stmpe_dbg_show_one(struct seq_file *s,
@@ -273,15 +273,21 @@ static void stmpe_dbg_show_one(struct seq_file *s,
u8 fall_reg;
u8 irqen_reg;
- char *edge_det_values[] = {"edge-inactive",
- "edge-asserted",
- "not-supported"};
- char *rise_values[] = {"no-rising-edge-detection",
- "rising-edge-detection",
- "not-supported"};
- char *fall_values[] = {"no-falling-edge-detection",
- "falling-edge-detection",
- "not-supported"};
+ static const char * const edge_det_values[] = {
+ "edge-inactive",
+ "edge-asserted",
+ "not-supported"
+ };
+ static const char * const rise_values[] = {
+ "no-rising-edge-detection",
+ "rising-edge-detection",
+ "not-supported"
+ };
+ static const char * const fall_values[] = {
+ "no-falling-edge-detection",
+ "falling-edge-detection",
+ "not-supported"
+ };
#define NOT_SUPPORTED_IDX 2
u8 edge_det = NOT_SUPPORTED_IDX;
u8 rise = NOT_SUPPORTED_IDX;
@@ -344,7 +350,7 @@ static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
for (i = 0; i < gc->ngpio; i++, gpio++) {
stmpe_dbg_show_one(s, gc, i, gpio);
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
}
@@ -426,12 +432,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct stmpe_gpio *stmpe_gpio;
- int ret;
- int irq = 0;
-
- irq = platform_get_irq(pdev, 0);
+ int ret, irq;
- stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
@@ -453,6 +456,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask)
stmpe_gpio->chip.irq.need_valid_mask = true;
+ irq = platform_get_irq(pdev, 0);
if (irq < 0)
dev_info(&pdev->dev,
"device configured in no-irq mode: "
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8db47f671708..02fa8fe2292a 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
* than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class,
+ &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index b5adb79a631a..d16e9d4a129b 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -553,8 +553,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
0, 0, of_node_to_fwnode(dev->of_node),
&thunderx_gpio_irqd_ops, txgpio);
- if (!txgpio->irqd)
+ if (!txgpio->irqd) {
+ err = -ENOMEM;
goto out;
+ }
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
new file mode 100644
index 000000000000..7f8f5b02e31d
--- /dev/null
+++ b/drivers/gpio/gpio-winbond.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO interface for Winbond Super I/O chips
+ * Currently, only W83627UHG (Nuvoton NCT6627UD) is supported.
+ *
+ * Author: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gpio/driver.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+
+#define WB_GPIO_DRIVER_NAME KBUILD_MODNAME
+
+#define WB_SIO_BASE 0x2e
+#define WB_SIO_BASE_HIGH 0x4e
+
+#define WB_SIO_EXT_ENTER_KEY 0x87
+#define WB_SIO_EXT_EXIT_KEY 0xaa
+
+/* global chip registers */
+
+#define WB_SIO_REG_LOGICAL 0x07
+
+#define WB_SIO_REG_CHIP_MSB 0x20
+#define WB_SIO_REG_CHIP_LSB 0x21
+
+#define WB_SIO_CHIP_ID_W83627UHG 0xa230
+#define WB_SIO_CHIP_ID_W83627UHG_MASK GENMASK(15, 4)
+
+#define WB_SIO_REG_DPD 0x22
+#define WB_SIO_REG_DPD_UARTA 4
+#define WB_SIO_REG_DPD_UARTB 5
+
+#define WB_SIO_REG_IDPD 0x23
+#define WB_SIO_REG_IDPD_UARTC 4
+#define WB_SIO_REG_IDPD_UARTD 5
+#define WB_SIO_REG_IDPD_UARTE 6
+#define WB_SIO_REG_IDPD_UARTF 7
+
+#define WB_SIO_REG_GLOBAL_OPT 0x24
+#define WB_SIO_REG_GO_ENFDC 1
+
+#define WB_SIO_REG_OVTGPIO3456 0x29
+#define WB_SIO_REG_OG3456_G3PP 3
+#define WB_SIO_REG_OG3456_G4PP 4
+#define WB_SIO_REG_OG3456_G5PP 5
+#define WB_SIO_REG_OG3456_G6PP 7
+
+#define WB_SIO_REG_I2C_PS 0x2a
+#define WB_SIO_REG_I2CPS_I2CFS 1
+
+#define WB_SIO_REG_GPIO1_MF 0x2c
+#define WB_SIO_REG_G1MF_G1PP 6
+#define WB_SIO_REG_G1MF_G2PP 7
+#define WB_SIO_REG_G1MF_FS_MASK GENMASK(1, 0)
+#define WB_SIO_REG_G1MF_FS_IR_OFF 0
+#define WB_SIO_REG_G1MF_FS_IR 1
+#define WB_SIO_REG_G1MF_FS_GPIO1 2
+#define WB_SIO_REG_G1MF_FS_UARTB 3
+
+/* not an actual device number, just a value meaning 'no device' */
+#define WB_SIO_DEV_NONE 0xff
+
+/* registers with offsets >= 0x30 are specific for a particular device */
+
+/* UART B logical device */
+#define WB_SIO_DEV_UARTB 0x03
+#define WB_SIO_UARTB_REG_ENABLE 0x30
+#define WB_SIO_UARTB_ENABLE_ON 0
+
+/* UART C logical device */
+#define WB_SIO_DEV_UARTC 0x06
+#define WB_SIO_UARTC_REG_ENABLE 0x30
+#define WB_SIO_UARTC_ENABLE_ON 0
+
+/* GPIO3, GPIO4 logical device */
+#define WB_SIO_DEV_GPIO34 0x07
+#define WB_SIO_GPIO34_REG_ENABLE 0x30
+#define WB_SIO_GPIO34_ENABLE_3 0
+#define WB_SIO_GPIO34_ENABLE_4 1
+#define WB_SIO_GPIO34_REG_IO3 0xe0
+#define WB_SIO_GPIO34_REG_DATA3 0xe1
+#define WB_SIO_GPIO34_REG_INV3 0xe2
+#define WB_SIO_GPIO34_REG_IO4 0xe4
+#define WB_SIO_GPIO34_REG_DATA4 0xe5
+#define WB_SIO_GPIO34_REG_INV4 0xe6
+
+/* WDTO, PLED, GPIO5, GPIO6 logical device */
+#define WB_SIO_DEV_WDGPIO56 0x08
+#define WB_SIO_WDGPIO56_REG_ENABLE 0x30
+#define WB_SIO_WDGPIO56_ENABLE_5 1
+#define WB_SIO_WDGPIO56_ENABLE_6 2
+#define WB_SIO_WDGPIO56_REG_IO5 0xe0
+#define WB_SIO_WDGPIO56_REG_DATA5 0xe1
+#define WB_SIO_WDGPIO56_REG_INV5 0xe2
+#define WB_SIO_WDGPIO56_REG_IO6 0xe4
+#define WB_SIO_WDGPIO56_REG_DATA6 0xe5
+#define WB_SIO_WDGPIO56_REG_INV6 0xe6
+
+/* GPIO1, GPIO2, SUSLED logical device */
+#define WB_SIO_DEV_GPIO12 0x09
+#define WB_SIO_GPIO12_REG_ENABLE 0x30
+#define WB_SIO_GPIO12_ENABLE_1 0
+#define WB_SIO_GPIO12_ENABLE_2 1
+#define WB_SIO_GPIO12_REG_IO1 0xe0
+#define WB_SIO_GPIO12_REG_DATA1 0xe1
+#define WB_SIO_GPIO12_REG_INV1 0xe2
+#define WB_SIO_GPIO12_REG_IO2 0xe4
+#define WB_SIO_GPIO12_REG_DATA2 0xe5
+#define WB_SIO_GPIO12_REG_INV2 0xe6
+
+/* UART D logical device */
+#define WB_SIO_DEV_UARTD 0x0d
+#define WB_SIO_UARTD_REG_ENABLE 0x30
+#define WB_SIO_UARTD_ENABLE_ON 0
+
+/* UART E logical device */
+#define WB_SIO_DEV_UARTE 0x0e
+#define WB_SIO_UARTE_REG_ENABLE 0x30
+#define WB_SIO_UARTE_ENABLE_ON 0
+
+/*
+ * for a description what a particular field of this struct means please see
+ * a description of the relevant module parameter at the bottom of this file
+ */
+struct winbond_gpio_params {
+ unsigned long base;
+ unsigned long gpios;
+ unsigned long ppgpios;
+ unsigned long odgpios;
+ bool pledgpio;
+ bool beepgpio;
+ bool i2cgpio;
+};
+
+static struct winbond_gpio_params params;
+
+static int winbond_sio_enter(unsigned long base)
+{
+ if (!request_muxed_region(base, 2, WB_GPIO_DRIVER_NAME))
+ return -EBUSY;
+
+ /*
+ * datasheet says two successive writes of the "key" value are needed
+ * in order for chip to enter the "Extended Function Mode"
+ */
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+
+ return 0;
+}
+
+static void winbond_sio_select_logical(unsigned long base, u8 dev)
+{
+ outb(WB_SIO_REG_LOGICAL, base);
+ outb(dev, base + 1);
+}
+
+static void winbond_sio_leave(unsigned long base)
+{
+ outb(WB_SIO_EXT_EXIT_KEY, base);
+
+ release_region(base, 2);
+}
+
+static void winbond_sio_reg_write(unsigned long base, u8 reg, u8 data)
+{
+ outb(reg, base);
+ outb(data, base + 1);
+}
+
+static u8 winbond_sio_reg_read(unsigned long base, u8 reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void winbond_sio_reg_bset(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val |= BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static void winbond_sio_reg_bclear(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val &= ~BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static bool winbond_sio_reg_btest(unsigned long base, u8 reg, u8 bit)
+{
+ return winbond_sio_reg_read(base, reg) & BIT(bit);
+}
+
+/**
+ * struct winbond_gpio_port_conflict - possibly conflicting device information
+ * @name: device name (NULL means no conflicting device defined)
+ * @dev: Super I/O logical device number where the testreg register
+ * is located (or WB_SIO_DEV_NONE - don't select any
+ * logical device)
+ * @testreg: register number where the testbit bit is located
+ * @testbit: index of a bit to check whether an actual conflict exists
+ * @warnonly: if set then a conflict isn't fatal (just warn about it),
+ * otherwise disable the particular GPIO port if a conflict
+ * is detected
+ */
+struct winbond_gpio_port_conflict {
+ const char *name;
+ u8 dev;
+ u8 testreg;
+ u8 testbit;
+ bool warnonly;
+};
+
+/**
+ * struct winbond_gpio_info - information about a particular GPIO port (device)
+ * @dev: Super I/O logical device number of the registers
+ * specified below
+ * @enablereg: port enable bit register number
+ * @enablebit: index of a port enable bit
+ * @outputreg: output driver mode bit register number
+ * @outputppbit: index of a push-pull output driver mode bit
+ * @ioreg: data direction register number
+ * @invreg: pin data inversion register number
+ * @datareg: pin data register number
+ * @conflict: description of a device that possibly conflicts with
+ * this port
+ */
+struct winbond_gpio_info {
+ u8 dev;
+ u8 enablereg;
+ u8 enablebit;
+ u8 outputreg;
+ u8 outputppbit;
+ u8 ioreg;
+ u8 invreg;
+ u8 datareg;
+ struct winbond_gpio_port_conflict conflict;
+};
+
+static const struct winbond_gpio_info winbond_gpio_infos[6] = {
+ { /* 0 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_1,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G1PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO1,
+ .invreg = WB_SIO_GPIO12_REG_INV1,
+ .datareg = WB_SIO_GPIO12_REG_DATA1,
+ .conflict = {
+ .name = "UARTB",
+ .dev = WB_SIO_DEV_UARTB,
+ .testreg = WB_SIO_UARTB_REG_ENABLE,
+ .testbit = WB_SIO_UARTB_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 1 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_2,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G2PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO2,
+ .invreg = WB_SIO_GPIO12_REG_INV2,
+ .datareg = WB_SIO_GPIO12_REG_DATA2
+ /* special conflict handling so doesn't use conflict data */
+ },
+ { /* 2 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_3,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G3PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO3,
+ .invreg = WB_SIO_GPIO34_REG_INV3,
+ .datareg = WB_SIO_GPIO34_REG_DATA3,
+ .conflict = {
+ .name = "UARTC",
+ .dev = WB_SIO_DEV_UARTC,
+ .testreg = WB_SIO_UARTC_REG_ENABLE,
+ .testbit = WB_SIO_UARTC_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 3 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_4,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G4PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO4,
+ .invreg = WB_SIO_GPIO34_REG_INV4,
+ .datareg = WB_SIO_GPIO34_REG_DATA4,
+ .conflict = {
+ .name = "UARTD",
+ .dev = WB_SIO_DEV_UARTD,
+ .testreg = WB_SIO_UARTD_REG_ENABLE,
+ .testbit = WB_SIO_UARTD_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 4 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_5,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G5PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO5,
+ .invreg = WB_SIO_WDGPIO56_REG_INV5,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA5,
+ .conflict = {
+ .name = "UARTE",
+ .dev = WB_SIO_DEV_UARTE,
+ .testreg = WB_SIO_UARTE_REG_ENABLE,
+ .testbit = WB_SIO_UARTE_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 5 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_6,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G6PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO6,
+ .invreg = WB_SIO_WDGPIO56_REG_INV6,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA6,
+ .conflict = {
+ .name = "FDC",
+ .dev = WB_SIO_DEV_NONE,
+ .testreg = WB_SIO_REG_GLOBAL_OPT,
+ .testbit = WB_SIO_REG_GO_ENFDC,
+ .warnonly = false
+ }
+ }
+};
+
+/* returns whether changing a pin is allowed */
+static bool winbond_gpio_get_info(unsigned int *gpio_num,
+ const struct winbond_gpio_info **info)
+{
+ bool allow_changing = true;
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG) {
+ if (*gpio_num < 8)
+ break;
+
+ *gpio_num -= 8;
+ }
+
+ *info = &winbond_gpio_infos[i];
+
+ /*
+ * GPIO2 (the second port) shares some pins with a basic PC
+ * functionality, which is very likely controlled by the firmware.
+ * Don't allow changing these pins by default.
+ */
+ if (i == 1) {
+ if (*gpio_num == 0 && !params.pledgpio)
+ allow_changing = false;
+ else if (*gpio_num == 1 && !params.beepgpio)
+ allow_changing = false;
+ else if ((*gpio_num == 5 || *gpio_num == 6) && !params.i2cgpio)
+ allow_changing = false;
+ }
+
+ return allow_changing;
+}
+
+static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
+
+ winbond_gpio_get_info(&offset, &info);
+
+ val = winbond_sio_enter(*base);
+ if (val)
+ return val;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ val = winbond_sio_reg_btest(*base, info->datareg, offset);
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ winbond_sio_leave(*base);
+
+ return val;
+}
+
+static int winbond_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bset(*base, info->ioreg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static int winbond_gpio_direction_out(struct gpio_chip *gc,
+ unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bclear(*base, info->ioreg, offset);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return;
+
+ if (winbond_sio_enter(*base) != 0)
+ return;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+}
+
+static struct gpio_chip winbond_gpio_chip = {
+ .base = -1,
+ .label = WB_GPIO_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .can_sleep = true,
+ .get = winbond_gpio_get,
+ .direction_input = winbond_gpio_direction_in,
+ .set = winbond_gpio_set,
+ .direction_output = winbond_gpio_direction_out,
+};
+
+static void winbond_gpio_configure_port0_pins(unsigned long base)
+{
+ unsigned int val;
+
+ val = winbond_sio_reg_read(base, WB_SIO_REG_GPIO1_MF);
+ if ((val & WB_SIO_REG_G1MF_FS_MASK) == WB_SIO_REG_G1MF_FS_GPIO1)
+ return;
+
+ pr_warn("GPIO1 pins were connected to something else (%.2x), fixing\n",
+ val);
+
+ val &= ~WB_SIO_REG_G1MF_FS_MASK;
+ val |= WB_SIO_REG_G1MF_FS_GPIO1;
+
+ winbond_sio_reg_write(base, WB_SIO_REG_GPIO1_MF, val);
+}
+
+static void winbond_gpio_configure_port1_check_i2c(unsigned long base)
+{
+ params.i2cgpio = !winbond_sio_reg_btest(base, WB_SIO_REG_I2C_PS,
+ WB_SIO_REG_I2CPS_I2CFS);
+ if (!params.i2cgpio)
+ pr_warn("disabling GPIO2.5 and GPIO2.6 as I2C is enabled\n");
+}
+
+static bool winbond_gpio_configure_port(unsigned long base, unsigned int idx)
+{
+ const struct winbond_gpio_info *info = &winbond_gpio_infos[idx];
+ const struct winbond_gpio_port_conflict *conflict = &info->conflict;
+
+ /* is there a possible conflicting device defined? */
+ if (conflict->name != NULL) {
+ if (conflict->dev != WB_SIO_DEV_NONE)
+ winbond_sio_select_logical(base, conflict->dev);
+
+ if (winbond_sio_reg_btest(base, conflict->testreg,
+ conflict->testbit)) {
+ if (conflict->warnonly)
+ pr_warn("enabled GPIO%u share pins with active %s\n",
+ idx + 1, conflict->name);
+ else {
+ pr_warn("disabling GPIO%u as %s is enabled\n",
+ idx + 1, conflict->name);
+ return false;
+ }
+ }
+ }
+
+ /* GPIO1 and GPIO2 need some (additional) special handling */
+ if (idx == 0)
+ winbond_gpio_configure_port0_pins(base);
+ else if (idx == 1)
+ winbond_gpio_configure_port1_check_i2c(base);
+
+ winbond_sio_select_logical(base, info->dev);
+
+ winbond_sio_reg_bset(base, info->enablereg, info->enablebit);
+
+ if (params.ppgpios & BIT(idx))
+ winbond_sio_reg_bset(base, info->outputreg,
+ info->outputppbit);
+ else if (params.odgpios & BIT(idx))
+ winbond_sio_reg_bclear(base, info->outputreg,
+ info->outputppbit);
+ else
+ pr_notice("GPIO%u pins are %s\n", idx + 1,
+ winbond_sio_reg_btest(base, info->outputreg,
+ info->outputppbit) ?
+ "push-pull" :
+ "open drain");
+
+ return true;
+}
+
+static int winbond_gpio_configure(unsigned long base)
+{
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG)
+ if (!winbond_gpio_configure_port(base, i))
+ __clear_bit(i, &params.gpios);
+
+ if (!params.gpios) {
+ pr_err("please use 'gpios' module parameter to select some active GPIO ports to enable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int winbond_gpio_check_chip(unsigned long base)
+{
+ int ret;
+ unsigned int chip;
+
+ ret = winbond_sio_enter(base);
+ if (ret)
+ return ret;
+
+ chip = winbond_sio_reg_read(base, WB_SIO_REG_CHIP_MSB) << 8;
+ chip |= winbond_sio_reg_read(base, WB_SIO_REG_CHIP_LSB);
+
+ pr_notice("chip ID at %lx is %.4x\n", base, chip);
+
+ if ((chip & WB_SIO_CHIP_ID_W83627UHG_MASK) !=
+ WB_SIO_CHIP_ID_W83627UHG) {
+ pr_err("not an our chip\n");
+ ret = -ENODEV;
+ }
+
+ winbond_sio_leave(base);
+
+ return ret;
+}
+
+static int winbond_gpio_imatch(struct device *dev, unsigned int id)
+{
+ unsigned long gpios_rem;
+ int ret;
+
+ gpios_rem = params.gpios & ~GENMASK(ARRAY_SIZE(winbond_gpio_infos) - 1,
+ 0);
+ if (gpios_rem) {
+ pr_warn("unknown ports (%lx) enabled in GPIO ports bitmask\n",
+ gpios_rem);
+ params.gpios &= ~gpios_rem;
+ }
+
+ if (params.ppgpios & params.odgpios) {
+ pr_err("some GPIO ports are set both to push-pull and open drain mode at the same time\n");
+ return 0;
+ }
+
+ if (params.base != 0)
+ return winbond_gpio_check_chip(params.base) == 0;
+
+ /*
+ * if the 'base' module parameter is unset probe two chip default
+ * I/O port bases
+ */
+ params.base = WB_SIO_BASE;
+ ret = winbond_gpio_check_chip(params.base);
+ if (ret == 0)
+ return 1;
+ if (ret != -ENODEV && ret != -EBUSY)
+ return 0;
+
+ params.base = WB_SIO_BASE_HIGH;
+ return winbond_gpio_check_chip(params.base) == 0;
+}
+
+static int winbond_gpio_iprobe(struct device *dev, unsigned int id)
+{
+ int ret;
+
+ if (params.base == 0)
+ return -EINVAL;
+
+ ret = winbond_sio_enter(params.base);
+ if (ret)
+ return ret;
+
+ ret = winbond_gpio_configure(params.base);
+
+ winbond_sio_leave(params.base);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Add 8 gpios for every GPIO port that was enabled in gpios
+ * module parameter (that wasn't disabled earlier in
+ * winbond_gpio_configure() & co. due to, for example, a pin conflict).
+ */
+ winbond_gpio_chip.ngpio = hweight_long(params.gpios) * 8;
+
+ /*
+ * GPIO6 port has only 5 pins, so if it is enabled we have to adjust
+ * the total count appropriately
+ */
+ if (params.gpios & BIT(5))
+ winbond_gpio_chip.ngpio -= (8 - 5);
+
+ winbond_gpio_chip.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &winbond_gpio_chip, &params.base);
+}
+
+static struct isa_driver winbond_gpio_idriver = {
+ .driver = {
+ .name = WB_GPIO_DRIVER_NAME,
+ },
+ .match = winbond_gpio_imatch,
+ .probe = winbond_gpio_iprobe,
+};
+
+module_isa_driver(winbond_gpio_idriver, 1);
+
+module_param_named(base, params.base, ulong, 0444);
+MODULE_PARM_DESC(base,
+ "I/O port base (when unset - probe chip default ones)");
+
+/* This parameter sets which GPIO devices (ports) we enable */
+module_param_named(gpios, params.gpios, ulong, 0444);
+MODULE_PARM_DESC(gpios,
+ "bitmask of GPIO ports to enable (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * These two parameters below set how we configure GPIO ports output drivers.
+ * It can't be a one bitmask since we need three values per port: push-pull,
+ * open-drain and keep as-is (this is the default).
+ */
+module_param_named(ppgpios, params.ppgpios, ulong, 0444);
+MODULE_PARM_DESC(ppgpios,
+ "bitmask of GPIO ports to set to push-pull mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+module_param_named(odgpios, params.odgpios, ulong, 0444);
+MODULE_PARM_DESC(odgpios,
+ "bitmask of GPIO ports to set to open drain mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * GPIO2.0 and GPIO2.1 control a basic PC functionality that we
+ * don't allow tinkering with by default (it is very likely that the
+ * firmware owns these pins).
+ * These two parameters below allow overriding these prohibitions.
+ */
+module_param_named(pledgpio, params.pledgpio, bool, 0644);
+MODULE_PARM_DESC(pledgpio,
+ "enable changing value of GPIO2.0 bit (Power LED), default no.");
+
+module_param_named(beepgpio, params.beepgpio, bool, 0644);
+MODULE_PARM_DESC(beepgpio,
+ "enable changing value of GPIO2.1 bit (BEEP), default no.");
+
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("GPIO interface for Winbond Super I/O chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2313af82fad3..acd59113e08b 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
struct irq_data *irq_data,
- bool early)
+ bool reserve)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index eb4528c87c0b..0ecffd172a80 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -414,7 +414,8 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args)
+ struct acpi_reference_args *args,
+ unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -430,6 +431,8 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
args->args[1] = par->line_index;
args->args[2] = par->active_low;
args->nargs = 3;
+
+ *quirks = gm->quirks;
return true;
}
@@ -461,8 +464,8 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
}
}
-int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+static int
+__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
int ret = 0;
@@ -489,12 +492,31 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
return ret;
}
+int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+ struct device *dev = &info->adev->dev;
+ enum gpiod_flags old = *flags;
+ int ret;
+
+ ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+ if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+ if (ret)
+ dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+ } else {
+ if (ret)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+ *flags = old;
+ }
+
+ return ret;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
int pin_index;
bool active_low;
- struct acpi_device *adev;
struct gpio_desc *desc;
int n;
};
@@ -531,8 +553,8 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.triggering = agpio->triggering;
} else {
lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
+ lookup->info.polarity = lookup->active_low;
}
-
}
return 1;
@@ -541,12 +563,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
struct acpi_gpio_info *info)
{
+ struct acpi_device *adev = lookup->info.adev;
struct list_head res_list;
int ret;
INIT_LIST_HEAD(&res_list);
- ret = acpi_dev_get_resources(lookup->adev, &res_list,
+ ret = acpi_dev_get_resources(adev, &res_list,
acpi_populate_gpio_lookup,
lookup);
if (ret < 0)
@@ -557,11 +580,8 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info) {
+ if (info)
*info = lookup->info;
- if (lookup->active_low)
- info->polarity = lookup->active_low;
- }
return 0;
}
@@ -570,6 +590,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
struct acpi_gpio_lookup *lookup)
{
struct acpi_reference_args args;
+ unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
@@ -581,14 +602,14 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (!adev)
return ret;
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args))
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args,
+ &quirks))
return ret;
}
/*
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
- lookup->adev = args.adev;
if (args.nargs != 3)
return -EPROTO;
@@ -596,6 +617,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
+ lookup->info.adev = args.adev;
+ lookup->info.quirks = quirks;
return 0;
}
@@ -643,11 +666,11 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ERR_PTR(ret);
dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
- dev_name(&lookup.adev->dev), lookup.index,
+ dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.adev = adev;
+ lookup.info.adev = adev;
}
ret = acpi_gpio_resource_lookup(&lookup, info);
@@ -664,7 +687,6 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
- int err;
int i;
/* Try first from _DSD */
@@ -703,10 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
if (info.polarity == GPIO_ACTIVE_LOW)
*lookupflags |= GPIO_ACTIVE_LOW;
- err = acpi_gpio_update_gpiod_flags(dflags, info.flags);
- if (err)
- dev_dbg(dev, "Override GPIO initialization flags\n");
-
+ acpi_gpio_update_gpiod_flags(dflags, &info);
return desc;
}
@@ -1074,7 +1093,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
}
if (!chip->names)
- devprop_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
acpi_gpiochip_request_regions(acpi_gpio);
acpi_gpiochip_scan_gpios(acpi_gpio);
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 27f383bda7d9..f748aa3e77f7 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -19,30 +19,27 @@
/**
* devprop_gpiochip_set_names - Set GPIO line names using device properties
* @chip: GPIO chip whose lines should be named, if possible
+ * @fwnode: Property Node containing the gpio-line-names property
*
* Looks for device property "gpio-line-names" and if it exists assigns
* GPIO line names for the chip. The memory allocated for the assigned
* names belong to the underlying firmware node and should not be released
* by the caller.
*/
-void devprop_gpiochip_set_names(struct gpio_chip *chip)
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+ const struct fwnode_handle *fwnode)
{
struct gpio_device *gdev = chip->gpiodev;
const char **names;
int ret, i;
- if (!chip->parent) {
- dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
- return;
- }
-
- ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
NULL, 0);
if (ret < 0)
return;
if (ret != gdev->ngpio) {
- dev_warn(chip->parent,
+ dev_warn(&gdev->dev,
"names %d do not match number of GPIOs %d\n", ret,
gdev->ngpio);
return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
if (!names)
return;
- ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
names, gdev->ngpio);
if (ret < 0) {
- dev_warn(chip->parent, "failed to read GPIO line names\n");
+ dev_warn(&gdev->dev, "failed to read GPIO line names\n");
kfree(names);
return;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e0d59e61b52f..564bb7a31da4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -56,6 +56,42 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
return gpiochip_get_desc(chip, ret);
}
+static void of_gpio_flags_quirks(struct device_node *np,
+ enum of_gpio_flags *flags)
+{
+ /*
+ * Some GPIO fixed regulator quirks.
+ * Note that active low is the default.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ of_device_is_compatible(np, "regulator-gpio"))) {
+ /*
+ * The regulator GPIO handles are specified such that the
+ * presence or absence of "enable-active-high" solely controls
+ * the polarity of the GPIO line. Any phandle flags must
+ * be actively ignored.
+ */
+ if (*flags & OF_GPIO_ACTIVE_LOW) {
+ pr_warn("%s GPIO handle specifies active low - ignored\n",
+ of_node_full_name(np));
+ *flags &= ~OF_GPIO_ACTIVE_LOW;
+ }
+ if (!of_property_read_bool(np, "enable-active-high"))
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
+ /*
+ * Legacy open drain handling for fixed voltage regulators.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ of_device_is_compatible(np, "reg-fixed-voltage") &&
+ of_property_read_bool(np, "gpio-open-drain")) {
+ *flags |= (OF_GPIO_SINGLE_ENDED | OF_GPIO_OPEN_DRAIN);
+ pr_info("%s uses legacy open drain flag - update the DTS if you can\n",
+ of_node_full_name(np));
+ }
+}
+
/**
* of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API
* @np: device node to get GPIO from
@@ -93,6 +129,9 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
+ if (flags)
+ of_gpio_flags_quirks(np, flags);
+
pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
__func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
@@ -117,6 +156,71 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
+/*
+ * The SPI GPIO bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+
+ /*
+ * Hopefully the compiler stubs the rest of the function if this
+ * is false.
+ */
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return ERR_PTR(-ENOENT);
+
+ /* Allow this specifically for "spi-gpio" devices */
+ if (!of_device_is_compatible(np, "spi-gpio") || !con_id)
+ return ERR_PTR(-ENOENT);
+
+ /* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
+
+ desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
+ return desc;
+}
+
+/*
+ * Some regulator bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /* These are the connection IDs we accept as legacy GPIO phandles */
+ const char *whitelist[] = {
+ "wlf,ldoena", /* Arizona */
+ "wlf,ldo1ena", /* WM8994 */
+ "wlf,ldo2ena", /* WM8994 */
+ };
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_REGULATOR))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id)
+ return ERR_PTR(-ENOENT);
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++)
+ if (!strcmp(con_id, whitelist[i]))
+ break;
+
+ if (i == ARRAY_SIZE(whitelist))
+ return ERR_PTR(-ENOENT);
+
+ desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
+ return desc;
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
@@ -126,6 +230,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
struct gpio_desc *desc;
unsigned int i;
+ /* Try GPIO property "foo-gpios" and "foo-gpio" */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
@@ -140,6 +245,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
+ /* Special handling for SPI GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_spi_gpio(dev, con_id, &of_flags);
+
+ /* Special handling for regulator GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -153,8 +266,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
+ if (of_flags & OF_GPIO_TRANSITORY)
+ *flags |= GPIO_TRANSITORY;
return desc;
}
@@ -214,6 +327,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (xlate_flags & OF_GPIO_ACTIVE_LOW)
*lflags |= GPIO_ACTIVE_LOW;
+ if (xlate_flags & OF_GPIO_TRANSITORY)
+ *lflags |= GPIO_TRANSITORY;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
@@ -493,7 +608,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
/* If the chip defines names itself, these take precedence */
if (!chip->names)
- devprop_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip,
+ of_fwnode_handle(chip->of_node));
of_node_get(chip->of_node);
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3f454eaf2101..3dbaf489a8a5 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#include "gpiolib.h"
@@ -106,8 +107,14 @@ static ssize_t value_show(struct device *dev,
mutex_lock(&data->mutex);
- status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
+ status = gpiod_get_value_cansleep(desc);
+ if (status < 0)
+ goto err;
+ buf[0] = '0' + status;
+ buf[1] = '\n';
+ status = 2;
+err:
mutex_unlock(&data->mutex);
return status;
@@ -118,7 +125,7 @@ static ssize_t value_store(struct device *dev,
{
struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status = 0;
mutex_lock(&data->mutex);
@@ -127,7 +134,11 @@ static ssize_t value_store(struct device *dev,
} else {
long value;
- status = kstrtol(buf, 0, &value);
+ if (size <= 2 && isdigit(buf[0]) &&
+ (size == 1 || buf[1] == '\n'))
+ value = buf[0] - '0';
+ else
+ status = kstrtol(buf, 0, &value);
if (status == 0) {
gpiod_set_value_cansleep(desc, value);
status = size;
@@ -138,7 +149,7 @@ static ssize_t value_store(struct device *dev,
return status;
}
-static DEVICE_ATTR_RW(value);
+static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
@@ -474,11 +485,15 @@ static ssize_t export_store(struct class *class,
status = -ENODEV;
goto done;
}
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+
+ status = gpiod_set_transitory(desc, false);
+ if (!status) {
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+ }
done:
if (status)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index aad84a6306c4..36ca5064486e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key);
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -161,7 +162,7 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- if (!desc || !desc->gdev || !desc->gdev->chip)
+ if (!desc || !desc->gdev)
return NULL;
return desc->gdev->chip;
}
@@ -195,7 +196,7 @@ static int gpiochip_find_base(int ngpio)
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
*
- * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error.
+ * Returns 0 for output, 1 for input, or an error code in case of error.
*
* This function may sleep if gpiod_cansleep() is true.
*/
@@ -459,6 +460,15 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
@@ -505,6 +515,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ ret = gpiod_set_transitory(desc, false);
+ if (ret < 0)
+ goto out_free_descs;
+
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -587,6 +601,9 @@ out_free_lh:
* @events: KFIFO for the GPIO events
* @read_lock: mutex lock to protect reads from colliding with adding
* new events to the FIFO
+ * @timestamp: cache for the timestamp storing it between hardirq
+ * and IRQ thread, used to bring the timestamp close to the actual
+ * event
*/
struct lineevent_state {
struct gpio_device *gdev;
@@ -597,17 +614,18 @@ struct lineevent_state {
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
struct mutex read_lock;
+ u64 timestamp;
};
#define GPIOEVENT_REQUEST_VALID_FLAGS \
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
-static unsigned int lineevent_poll(struct file *filep,
+static __poll_t lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct lineevent_state *le = filep->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(filep, &le->wait, wait);
@@ -731,7 +749,10 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
struct gpioevent_data ge;
int ret, level;
- ge.timestamp = ktime_get_real_ns();
+ /* Do not leak kernel stack to userspace */
+ memset(&ge, 0, sizeof(ge));
+
+ ge.timestamp = le->timestamp;
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
@@ -759,6 +780,19 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_HANDLED;
}
+static irqreturn_t lineevent_irq_handler(int irq, void *p)
+{
+ struct lineevent_state *le = p;
+
+ /*
+ * Just store the timestamp in hardirq context so we get it as
+ * close in time as possible to the actual event.
+ */
+ le->timestamp = ktime_get_real_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
static int lineevent_create(struct gpio_device *gdev, void __user *ip)
{
struct gpioevent_request eventreq;
@@ -851,7 +885,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
- NULL,
+ lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
le->label,
@@ -1049,7 +1083,7 @@ static void gpiodevice_release(struct device *dev)
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
- kfree(gdev->label);
+ kfree_const(gdev->label);
kfree(gdev->descs);
kfree(gdev);
}
@@ -1100,7 +1134,8 @@ static void gpiochip_setup_devs(void)
}
int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
unsigned long flags;
int status = 0;
@@ -1157,10 +1192,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_descs;
}
- if (chip->label)
- gdev->label = kstrdup(chip->label, GFP_KERNEL);
- else
- gdev->label = kstrdup("unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
goto err_free_descs;
@@ -1207,31 +1239,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
- /*
- * REVISIT: most hardware initializes GPIOs as inputs
- * (often with pullups enabled) so power usage is
- * minimized. Linux code should set the gpio direction
- * first thing; but until it does, and in case
- * chip->get_direction is not set, we may expose the
- * wrong direction in sysfs.
- */
-
- if (chip->get_direction) {
- /*
- * If we have .get_direction, set up the initial
- * direction flag from the hardware.
- */
- int dir = chip->get_direction(chip, i);
- if (!dir)
- set_bit(FLAG_IS_OUT, &desc->flags);
- } else if (!chip->direction_input) {
- /*
- * If the chip lacks the .direction_input callback
- * we logically assume all lines are outputs.
- */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
#ifdef CONFIG_PINCTRL
@@ -1246,7 +1261,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (status)
goto err_remove_from_list;
- status = gpiochip_add_irqchip(chip, key);
+ status = gpiochip_add_irqchip(chip, lock_key, request_key);
if (status)
goto err_remove_chip;
@@ -1281,7 +1296,7 @@ err_remove_from_list:
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
- kfree(gdev->label);
+ kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
err_free_gdev:
@@ -1381,7 +1396,7 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -1508,14 +1523,15 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->irq.valid_mask = NULL;
}
-static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
- unsigned int offset)
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset)
{
/* No mask means all valid */
if (likely(!gpiochip->irq.valid_mask))
return true;
return test_bit(offset, gpiochip->irq.valid_mask);
}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
@@ -1632,7 +1648,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->irq.lock_key);
+ irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
/* Chips that use nested thread handlers have them marked */
if (chip->irq.threaded)
@@ -1712,10 +1728,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
* @gpiochip: the GPIO chip to add the IRQ chip to
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*/
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct irq_chip *irqchip = gpiochip->irq.chip;
const struct irq_domain_ops *ops;
@@ -1753,6 +1771,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.default_type = type;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
if (gpiochip->irq.domain_ops)
ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1869,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
* @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*
* This function closely associates a certain irqchip with a certain
* gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1892,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type,
bool threaded,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct device_node *of_node;
@@ -1913,6 +1934,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
gpiochip->irq.default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
gpiochip->irq.domain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1962,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
return 0;
}
@@ -2165,40 +2188,37 @@ done:
* macro to avoid endless duplication. If the desc is NULL it is an
* optional GPIO and calls should just bail out.
*/
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer)\n", func);
+ return PTR_ERR(desc);
+ }
+ if (!desc->gdev) {
+ pr_warn("%s: invalid GPIO (no device)\n", func);
+ return -EINVAL;
+ }
+ if (!desc->gdev->chip) {
+ dev_warn(&desc->gdev->dev,
+ "%s: backing chip is gone\n", func);
+ return 0;
+ }
+ return 1;
+}
+
#define VALIDATE_DESC(desc) do { \
- if (!desc) \
- return 0; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return PTR_ERR(desc); \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return -EINVAL; \
- } \
- if ( !desc->gdev->chip ) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return 0; \
- } } while (0)
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc) \
- return; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return; \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
return; \
- } \
- if (!desc->gdev->chip) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return; \
- } } while (0)
-
+ } while (0)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
@@ -2447,7 +2467,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc = desc->gdev->chip;
+ struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -2464,6 +2484,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
+ gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2521,6 +2542,50 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
/**
+ * gpiod_set_transitory - Lose or retain GPIO state on suspend or reset
+ * @desc: descriptor of the GPIO for which to configure persistence
+ * @transitory: True to lose state on suspend or reset, false for persistence
+ *
+ * Returns:
+ * 0 on success, otherwise a negative error code.
+ */
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ struct gpio_chip *chip;
+ unsigned long packed;
+ int gpio;
+ int rc;
+
+ VALIDATE_DESC(desc);
+ /*
+ * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * persistence state.
+ */
+ if (transitory)
+ set_bit(FLAG_TRANSITORY, &desc->flags);
+ else
+ clear_bit(FLAG_TRANSITORY, &desc->flags);
+
+ /* If the driver supports it, set the persistence state now */
+ chip = desc->gdev->chip;
+ if (!chip->set_config)
+ return 0;
+
+ packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
+ !transitory);
+ gpio = gpio_chip_hwgpio(desc);
+ rc = chip->set_config(chip, gpio, packed);
+ if (rc == -ENOTSUPP) {
+ dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
+ gpio);
+ return 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_transitory);
+
+/**
* gpiod_is_active_low - test whether a GPIO is active-low or not
* @desc: the gpio descriptor to test
*
@@ -2884,6 +2949,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
/**
+ * gpiod_set_value_nocheck() - set a GPIO line value without checking
+ * @desc: the descriptor to set the value on
+ * @value: value to set
+ *
+ * This sets the value of a GPIO line backing a descriptor, applying
+ * different semantic quirks like active low and open drain/source
+ * handling.
+ */
+static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+{
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ gpio_set_open_source_value_commit(desc, value);
+ else
+ gpiod_set_raw_value_commit(desc, value);
+}
+
+/**
* gpiod_set_value() - assign a gpio's value
* @desc: gpio whose value will be assigned
* @value: value to assign
@@ -2897,16 +2983,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -3107,8 +3185,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
- &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -3234,9 +3311,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
VALIDATE_DESC_VOID(desc);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -3545,8 +3620,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
+
+ status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+ if (status < 0)
+ return status;
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -3586,6 +3663,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -3614,7 +3693,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
@@ -3630,17 +3713,88 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags. If the node does not have the requested GPIO
+ * property, NULL is returned.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ unsigned long lflags = 0;
+ enum of_gpio_flags flags;
+ bool active_low = false;
+ bool single_ended = false;
+ bool open_drain = false;
+ bool transitory = false;
+ int ret;
+
+ desc = of_get_named_gpiod_flags(node, propname,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+ /* If it is not there, just return NULL */
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ return desc;
+ }
+
+ active_low = flags & OF_GPIO_ACTIVE_LOW;
+ single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
+ transitory = flags & OF_GPIO_TRANSITORY;
+
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (active_low)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (single_ended) {
+ if (open_drain)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (transitory)
+ lflags |= GPIO_TRANSITORY;
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL(gpiod_get_from_of_node);
+
+/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain in the consumer
+ * @index: index of the GPIO to obtain for the consumer
* @dflags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
- * from firmware.
+ * from opaque firmware.
*
- * Function properly finds the corresponding GPIO using whatever is the
+ * The function properly finds the corresponding GPIO using whatever is the
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
@@ -3657,53 +3811,35 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
{
struct gpio_desc *desc = ERR_PTR(-ENODEV);
unsigned long lflags = 0;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
int ret;
if (!fwnode)
return ERR_PTR(-EINVAL);
if (is_of_node(fwnode)) {
- enum of_gpio_flags flags;
-
- desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname,
- index, &flags);
- if (!IS_ERR(desc)) {
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- }
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
} else if (is_acpi_node(fwnode)) {
struct acpi_gpio_info info;
desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (!IS_ERR(desc)) {
- active_low = info.polarity == GPIO_ACTIVE_LOW;
- ret = acpi_gpio_update_gpiod_flags(&dflags, info.flags);
- if (ret)
- pr_debug("Override GPIO initialization flags\n");
- }
- }
+ if (IS_ERR(desc))
+ return desc;
- if (IS_ERR(desc))
- return desc;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+
+ if (info.polarity == GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+ }
+ /* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index af48322839c3..b17ec6795c81 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -58,7 +58,7 @@ struct gpio_device {
struct gpio_desc *descs;
int base;
u16 ngpio;
- char *label;
+ const char *label;
void *data;
struct list_head list;
@@ -75,16 +75,20 @@ struct gpio_device {
/**
* struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
+ struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
int polarity;
int triggering;
+ unsigned int quirks;
};
/* gpio suffixes used for ACPI and device tree lookup */
@@ -124,7 +128,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- enum gpiod_flags update);
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
@@ -149,7 +153,7 @@ static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
{
return 0;
}
@@ -189,6 +193,12 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_desc **desc_array,
int *value_array);
+/* This is just passed between gpiolib and devres */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
+
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
@@ -205,7 +215,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
/* Connection label */
const char *label;
@@ -228,7 +238,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
-void devprop_gpiochip_set_names(struct gpio_chip *chip);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+ const struct fwnode_handle *fwnode);
/* With descriptor prefix */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index da43813d67a4..5aeb5f8816f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f71fe6d2ddda..bb5fa895fb64 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state)
{
struct drm_display_mode *preferred_mode = NULL;
- const struct drm_connector *drm_connector;
+ struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) {
/*
- * Exclude MST from creating fake_sink
- * TODO: need to enable MST into fake_sink feature
+ * Create dc_sink when necessary to MST
+ * Don't apply fake_sink to MST
*/
- if (aconnector->mst_port)
- goto stream_create_fail;
+ if (aconnector->mst_port) {
+ dm_dp_mst_dc_sink_create(drm_connector);
+ goto mst_dc_sink_create_done;
+ }
if (create_fake_sink(aconnector))
goto stream_create_fail;
@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail:
dm_state_null:
drm_connector_null:
+mst_dc_sink_create_done:
return stream;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 117521c6a6ed..0230250a1164 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+
+ bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f8efb98b1fa7..638c2c2b5cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return ret;
}
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)aconnector->edid,
+ (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, aconnector->edid);
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
+ aconnector->mst_connected = true;
return &aconnector->base;
}
}
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
+ aconnector->mst_connected = true;
+
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
+
+ aconnector->mst_connected = false;
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+ mutex_lock(&connector->dev->mode_config.mutex);
+ drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector);
+ if (aconnector->mst_connected)
+ dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..8cf51da26657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3dce35e66b09..b142629a1058 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ if (v->override_hta_pschroma[input_idx] == 1)
+ v->override_hta_pschroma[input_idx] = 2;
+ if (v->override_vta_pschroma[input_idx] == 1)
+ v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e27ed4a45265..42a111b9505d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
}
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps)
{
@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 07ff8d2faf3f..d844fadcd56f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
int num_planes,
struct dc_state *context)
{
- int i, be_idx;
+ int i;
if (num_planes == 0)
return;
- be_idx = -1;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (stream == context->res_ctx.pipe_ctx[i].stream) {
- be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
- break;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
}
}
@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
program_surface_visibility(dc, pipe_ctx);
}
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if ((stream == pipe_ctx->stream) &&
+ (!pipe_ctx->top_pipe) &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ }
}
static void dce110_power_down_fe(struct dc *dc, int fe_idx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 74e7c82bdc76..a9d55d0dd69e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
- /*
- * Spreadsheet doesn't handle taps_c is one properly,
- * need to force Chroma to always be scaled to pass
- * bandwidth validation.
- */
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1aba47..34daf895f848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void dpp1_cm_set_gamut_remap(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c244bf5..ed1216b53465 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust)
+ enum dc_color_space colorspace)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t ocsc_mode = 0;
- if (default_adjust != NULL) {
- switch (default_adjust->out_color_space) {
+ switch (colorspace) {
case COLOR_SPACE_SRGB:
case COLOR_SPACE_2020_RGB_FULLRANGE:
ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
case COLOR_SPACE_UNKNOWN:
default:
break;
- }
}
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c3b454..05dc01e54531 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
tbl_entry.color_space = color_space;
//tbl_entry.regval = matrix;
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ } else {
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a68460edcd..9420dfb94d39 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
void (*opp_set_csc_default)(
struct dpp *dpp,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void (*opp_set_csc_adjustment)(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..a0f4d2a2a481 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
u32 addr = drm_fb_obj(fb)->dev_addr;
- int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
num_planes = 3;
- for (i = 0; i < num_planes; i++)
+ addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * fb->format->cpp[i];
+ x * format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
if (plane->fb)
drm_framebuffer_put(plane->fb);
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
/* Power down most RAMs and FIFOs if this is the primary plane */
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
dma_ctrl0_mask = CFG_GRA_ENA;
} else {
+ /* Power down the Y/U/V FIFOs */
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
dma_ctrl0_mask = CFG_DMA_ENA;
}
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..bfd3514fbe9b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
};
struct armada_plane_state {
+ u16 src_x;
+ u16 src_y;
u32 src_hw;
u32 dst_hw;
u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..aba947696178 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ const struct drm_format_info *format;
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
uint32_t val, ctrl0;
unsigned idx = 0;
- bool visible;
+ bool visible, fb_changed;
int ret;
trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
+ /*
+ * Shifting a YUV packed format image by one pixel causes the U/V
+ * planes to swap. Compensate for it by also toggling the UV swap.
+ */
+ format = fb->format;
+ if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ fb_changed = plane->fb != fb ||
+ dplane->base.state.src_x != src.x1 >> 16 ||
+ dplane->base.state.src_y != src.y1 >> 16;
+
if (!dcrtc->plane) {
dcrtc->plane = plane;
armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
+ if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- if (plane->fb != fb) {
- u32 addrs[3], pixel_format;
- int num_planes, hsub;
+ if (fb_changed) {
+ u32 addrs[3];
/*
* Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
- src_y = src.y1 >> 16;
- src_x = src.x1 >> 16;
+ dplane->base.state.src_y = src_y = src.y1 >> 16;
+ dplane->base.state.src_x = src_x = src.x1 >> 16;
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
- pixel_format = fb->format->format;
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = fb->format->num_planes;
-
- /*
- * Annoyingly, shifting a YUYV-format image by one pixel
- * causes the U/V planes to toggle. Toggle the UV swap.
- * (Unfortunately, this causes momentary colour flickering.)
- */
- if (src_x & (hsub - 1) && num_planes == 1)
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
-
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 482014137953..9ae236036e32 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,14 +152,23 @@ static void drm_connector_free(struct kref *kref)
connector->funcs->destroy(connector);
}
-static void drm_connector_free_work_fn(struct work_struct *work)
+void drm_connector_free_work_fn(struct work_struct *work)
{
- struct drm_connector *connector =
- container_of(work, struct drm_connector, free_work);
- struct drm_device *dev = connector->dev;
+ struct drm_connector *connector, *n;
+ struct drm_device *dev =
+ container_of(work, struct drm_device, mode_config.connector_free_work);
+ struct drm_mode_config *config = &dev->mode_config;
+ unsigned long flags;
+ struct llist_node *freed;
- drm_mode_object_unregister(dev, &connector->base);
- connector->funcs->destroy(connector);
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ freed = llist_del_all(&config->connector_free_list);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+ llist_for_each_entry_safe(connector, n, freed, free_node) {
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+ }
}
/**
@@ -191,8 +200,6 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
return ret;
- INIT_WORK(&connector->free_work, drm_connector_free_work_fn);
-
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
@@ -547,10 +554,17 @@ EXPORT_SYMBOL(drm_connector_list_iter_begin);
* actually release the connector when dropping our final reference.
*/
static void
-drm_connector_put_safe(struct drm_connector *conn)
+__drm_connector_put_safe(struct drm_connector *conn)
{
- if (refcount_dec_and_test(&conn->base.refcount.refcount))
- schedule_work(&conn->free_work);
+ struct drm_mode_config *config = &conn->dev->mode_config;
+
+ lockdep_assert_held(&config->connector_list_lock);
+
+ if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+ return;
+
+ llist_add(&conn->free_node, &config->connector_free_list);
+ schedule_work(&config->connector_free_work);
}
/**
@@ -582,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
/* loop until it's not a zombie connector */
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
- spin_unlock_irqrestore(&config->connector_list_lock, flags);
if (old_conn)
- drm_connector_put_safe(old_conn);
+ __drm_connector_put_safe(old_conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
return iter->conn;
}
@@ -602,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
*/
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
+ struct drm_mode_config *config = &iter->dev->mode_config;
+ unsigned long flags;
+
iter->dev = NULL;
- if (iter->conn)
- drm_connector_put_safe(iter->conn);
+ if (iter->conn) {
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ __drm_connector_put_safe(iter->conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+ }
lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1231,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ /* Set the display info, using edid if available, otherwise
+ * reseting the values to defaults. This duplicates the work
+ * done in drm_add_edid_modes, but that function is not
+ * consistently called before this one in all drivers and the
+ * computation is cheap enough that it seems better to
+ * duplicate it rather than attempt to ensure some arbitrary
+ * ordering of calls.
+ */
+ if (edid)
+ drm_add_display_info(connector, edid);
+ else
+ drm_reset_display_info(connector);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb8841778c..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dfe14763871..cb487148359a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
*
* This tells subsequent routines what fixes they need to apply.
*/
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
{
const struct edid_quirk *quirk;
int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
return edid_ext;
}
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, CEA_EXT);
}
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
}
static void drm_parse_cea_ext(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_display_info *info = &connector->display_info;
const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
-static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
{
struct drm_display_info *info = &connector->display_info;
+ info->width_mm = 0;
+ info->height_mm = 0;
+
+ info->bpc = 0;
+ info->color_formats = 0;
+ info->cea_rev = 0;
+ info->max_tmds_clock = 0;
+ info->dvi_dual = false;
+
+ info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ u32 quirks = edid_get_quirks(edid);
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
if (edid->revision < 3)
- return;
+ return quirks;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
- return;
+ return quirks;
drm_parse_cea_ext(connector, edid);
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
- return;
+ return quirks;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ return quirks;
}
+EXPORT_SYMBOL_GPL(drm_add_display_info);
static int validate_displayid(u8 *displayid, int length, int idx)
{
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
- quirks = edid_get_quirks(edid);
-
/*
* CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid, quirks);
+ quirks = drm_add_display_info(connector, edid);
/*
* EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b3c6e997ccdb..9a17bd3639d1 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -559,10 +559,10 @@ EXPORT_SYMBOL(drm_read);
*
* Mask of POLL flags indicating the current status of the file.
*/
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file_priv->event_wait, wait);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a1eff4..1402c0e71b03 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
mutex_lock(&dev->mode_config.idr_mutex);
- /* Insert the new lessee into the tree */
- id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
- if (id < 0) {
- error = id;
- goto out_lessee;
- }
-
- lessee->lessee_id = id;
- lessee->lessor = drm_master_get(lessor);
- list_add_tail(&lessee->lessee_list, &lessor->lessees);
-
idr_for_each_entry(leases, entry, object) {
error = 0;
if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
}
}
+ /* Insert the new lessee into the tree */
+ id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+ if (id < 0) {
+ error = id;
+ goto out_lessee;
+ }
+
+ lessee->lessee_id = id;
+ lessee->lessor = drm_master_get(lessor);
+ list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
/* Move the leases over */
lessee->leases = *leases;
DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
return lessee;
out_lessee:
- drm_master_put(&lessee);
-
mutex_unlock(&dev->mode_config.idr_mutex);
+ drm_master_put(&lessee);
+
return ERR_PTR(error);
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8ea74bc..c3c79ee6119e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
+ struct drm_mm *mm = old->mm;
+
DRM_MM_BUG_ON(!old->allocated);
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+ rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb_hole_size,
&new->rb_hole_size,
- &old->mm->holes_size);
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
- &old->mm->holes_addr);
+ &mm->holes_addr);
}
old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cc78b3d9e5e4..256de7313612 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
+ init_llist_head(&dev->mode_config.connector_free_list);
+ INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
drm_mode_create_standard_properties(dev);
/* Just to be sure */
@@ -432,7 +435,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
drm_connector_list_iter_end(&conn_iter);
/* connector_iter drops references in a work item. */
- flush_scheduled_work();
+ flush_work(&dev->mode_config.connector_free_work);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 37a93cdffb4a..2c90519576a3 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
}
/*
- * setplane_internal - setplane handler for internal callers
+ * __setplane_internal - setplane handler for internal callers
*
- * Note that we assume an extra reference has already been taken on fb. If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
+ * This function will take a reference on the new fb for the plane
+ * on success.
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
- fb = NULL;
+ drm_framebuffer_get(plane->fb);
} else {
plane->old_fb = NULL;
}
out:
- if (fb)
- drm_framebuffer_put(fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
}
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
- return setplane_internal(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
+ ret = setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+
+ if (fb)
+ drm_framebuffer_put(fb);
+
+ return ret;
}
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
ret = __setplane_internal(crtc->cursor, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, ctx);
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
+
+ if (fb)
+ drm_framebuffer_put(fb);
/* Update successful; save new cursor position, if necessary */
if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f776fc1cc543..cb4d09c70fd4 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
.release = drm_syncobj_file_release,
};
-static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
-{
- struct file *file = anon_inode_getfile("syncobj_file",
- &drm_syncobj_file_fops,
- syncobj, 0);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- drm_syncobj_get(syncobj);
- if (cmpxchg(&syncobj->file, NULL, file)) {
- /* lost the race */
- fput(file);
- }
-
- return 0;
-}
-
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
- int ret;
+ struct file *file;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
- if (!syncobj->file) {
- ret = drm_syncobj_alloc_file(syncobj);
- if (ret) {
- put_unused_fd(fd);
- return ret;
- }
+ file = anon_inode_getfile("syncobj_file",
+ &drm_syncobj_file_fops,
+ syncobj, 0);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
}
- fd_install(fd, syncobj->file);
+
+ drm_syncobj_get(syncobj);
+ fd_install(fd, file);
+
*p_fd = fd;
return 0;
}
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
return ret;
}
-static struct drm_syncobj *drm_syncobj_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
- if (file->f_op != &drm_syncobj_file_fops)
- goto err;
-
- return file->private_data;
-err:
- fput(file);
- return NULL;
-};
-
static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
- struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+ struct drm_syncobj *syncobj;
+ struct file *file;
int ret;
- if (!syncobj)
+ file = fget(fd);
+ if (!file)
return -EINVAL;
+ if (file->f_op != &drm_syncobj_file_fops) {
+ fput(file);
+ return -EINVAL;
+ }
+
/* take a reference to put in the idr */
+ syncobj = file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
spin_unlock(&file_private->syncobj_table_lock);
idr_preload_end();
- if (ret < 0) {
- fput(syncobj->file);
- return ret;
- }
- *handle = ret;
- return 0;
+ if (ret > 0) {
+ *handle = ret;
+ ret = 0;
+ } else
+ drm_syncobj_put(syncobj);
+
+ fput(file);
+ return ret;
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57870fb..49af94627c8a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, int rings)
+ unsigned int opcode, unsigned long rings)
{
struct cmd_info *info = NULL;
unsigned int ring;
- for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
info = find_cmd_entry(gvt, opcode, ring);
if (info)
break;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 355120865efd..309f3fa6794a 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
- vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e331142badb..64d67ff9bf08 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
return ret;
} else {
if (!test_bit(index, spt->post_shadow_bitmap)) {
+ int type = spt->shadow_page.type;
+
ppgtt_get_shadow_entry(spt, &se, index);
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
if (ret)
return ret;
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &se, index);
}
-
ppgtt_set_post_shadow(spt, index);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c582b6..e143004e66d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad4050f7ab3b..5cfba89ed586 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
- i915_gem_retire_requests(to_i915(obj->base.dev));
-
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
@@ -474,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
struct drm_i915_gem_request *rq;
struct intel_engine_cs *engine;
- if (!dma_fence_is_i915(fence))
+ if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
return;
rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 59ee808f8fd9..d453756ca128 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2331,12 +2331,12 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- unsigned int events = 0;
+ __poll_t events = 0;
stream->ops->poll_wait(stream, file, wait);
@@ -2365,11 +2365,11 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
struct drm_i915_private *dev_priv = stream->dev_priv;
- int ret;
+ __poll_t ret;
mutex_lock(&dev_priv->perf.lock);
ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3866c49bc390..7923dfd9963c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6977,6 +6977,7 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
#define MASK_WAKEMEM (1<<13)
#define SKL_DFSM _MMIO(0x51000)
@@ -7026,6 +7027,8 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -8522,6 +8525,7 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index e8ca67a129d2..ac236b88c99c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
struct dma_fence *dma;
struct timer_list timer;
struct irq_work work;
+ struct rcu_head rcu;
};
static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
del_timer_sync(&cb->timer);
dma_fence_put(cb->dma);
- kfree(cb);
+ kfree_rcu(cb, rcu);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5f8b9f1f40f1..bcbc7abe6693 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_wait *wait, *n, *first;
if (!b->irq_armed)
- return;
+ goto wakeup_signaler;
/* We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock);
+
+ /*
+ * The signaling thread may be asleep holding a reference to a request,
+ * that had its signaling cancelled prior to being preempted. We need
+ * to kick the signaler, just in case, to release any such reference.
+ */
+wakeup_signaler:
+ wake_up_process(b->signaler);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
- DEFINE_WAIT(exec);
-
if (kthread_should_park())
kthread_parkme();
- if (kthread_should_stop()) {
- GEM_BUG_ON(request);
+ if (unlikely(kthread_should_stop())) {
+ i915_gem_request_put(request);
break;
}
- if (request)
- add_wait_queue(&request->execute, &exec);
-
schedule();
-
- if (request)
- remove_wait_queue(&request->execute, &exec);
}
i915_gem_request_put(request);
} while (1);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62b71c0..60cf4e58389a 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
WARN_ON(vco != 8100000 && vco != 8640000);
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
- I915_WRITE(CDCLK_CTL, val);
- POSTING_READ(CDCLK_CTL);
-
/*
* We always enable DPLL0 with the lowest link rate possible, but still
* taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack;
+ u32 freq_select, pcu_ack, cdclk_ctl;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* set CDCLK_CTL */
+ /* Choose frequency for this cdclk */
switch (cdclk) {
case 450000:
case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
+ cdclk_ctl = I915_READ(CDCLK_CTL);
+
+ if (dev_priv->cdclk.hw.vco != vco) {
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ }
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ POSTING_READ(CDCLK_CTL);
+
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e0843bb99169..58a3755544b2 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
+ mutex_lock(&dev_priv->dpll_lock);
+
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
} else if (INTEL_INFO(dev_priv)->gen < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e8ccf89cb17b..50f8443641b8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe));
}
-static void assert_cursor(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
- else
- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
- I915_STATE_WARN(cur_state != state,
- "cursor on pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
-
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe_name(pipe), onoff(state), onoff(cur_state));
}
-static void assert_plane(struct drm_i915_private *dev_priv,
- enum plane plane, bool state)
+static void assert_plane(struct intel_plane *plane, bool state)
{
- u32 val;
- bool cur_state;
+ bool cur_state = plane->get_hw_state(plane);
- val = I915_READ(DSPCNTR(plane));
- cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
- "plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), onoff(state), onoff(cur_state));
+ "%s assertion failure (expected %s, current %s)\n",
+ plane->base.name, onoff(state), onoff(cur_state));
}
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
-static void assert_planes_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void assert_planes_disabled(struct intel_crtc *crtc)
{
- int i;
-
- /* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 val = I915_READ(DSPCNTR(pipe));
- I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
- "plane %c assertion failure, should be disabled but not\n",
- plane_name(pipe));
- return;
- }
-
- /* Need to check both planes against the pipe */
- for_each_pipe(dev_priv, i) {
- u32 val = I915_READ(DSPCNTR(i));
- enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
- I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
- "plane %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(i), pipe_name(pipe));
- }
-}
-
-static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- int sprite;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- if (INTEL_GEN(dev_priv) >= 9) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(PLANE_CTL(pipe, sprite));
- I915_STATE_WARN(val & PLANE_CTL_ENABLE,
- "plane %d assertion failure, should be off on pipe %c but is still active\n",
- sprite, pipe_name(pipe));
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
- I915_STATE_WARN(val & SP_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- sprite_name(pipe, sprite), pipe_name(pipe));
- }
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 val = I915_READ(SPRCTL(pipe));
- I915_STATE_WARN(val & SPRITE_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
- u32 val = I915_READ(DVSCNTR(pipe));
- I915_STATE_WARN(val & DVS_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- }
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ assert_plane_disabled(plane);
}
static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1918,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
/*
* A pipe without a PLL won't actually be able to drive bits from
@@ -1989,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -2820,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->active_planes);
}
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, false);
+
+ if (plane->id == PLANE_PRIMARY)
+ intel_pre_disable_primary_noatomic(&crtc->base);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc);
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -2877,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- intel_set_plane_visible(to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state),
- false);
- intel_pre_disable_primary_noatomic(&intel_crtc->base);
- trace_intel_disable_plane(primary, intel_crtc);
- intel_plane->disable_plane(intel_plane, intel_crtc);
+ intel_plane_disable_noatomic(intel_crtc, intel_plane);
return;
@@ -3385,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+{
+
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane plane = primary->plane;
+ enum pipe pipe = primary->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
@@ -4866,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* a vblank wait.
*/
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4899,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
if (!crtc->config->ips_enabled)
return;
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5899,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
+ struct intel_plane *plane;
u64 domains;
struct drm_atomic_state *state;
struct intel_crtc_state *crtc_state;
@@ -5907,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
if (!intel_crtc->active)
return;
- if (crtc->primary->state->visible) {
- intel_pre_disable_primary_noatomic(crtc);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- crtc->primary->state->visible = false;
+ if (plane_state->base.visible)
+ intel_plane_disable_noatomic(intel_crtc, plane);
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -9477,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
+static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -9670,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
@@ -9944,11 +9951,10 @@ found:
}
ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+ drm_framebuffer_put(fb);
if (ret)
goto fail;
- drm_framebuffer_put(fb);
-
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
@@ -12545,11 +12551,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -13195,13 +13205,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13212,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13219,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13226,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
}
if (INTEL_GEN(dev_priv) >= 9)
@@ -13315,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->update_plane = i845_update_cursor;
cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->update_plane = i9xx_update_cursor;
cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
@@ -14463,6 +14479,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -14666,8 +14684,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe));
- assert_plane_disabled(dev_priv, PLANE_A);
- assert_plane_disabled(dev_priv, PLANE_B);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -14678,22 +14699,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
-static bool
-intel_check_plane_mapping(struct intel_crtc *crtc)
+static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
+ struct intel_plane *primary)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val;
+ enum plane plane = primary->plane;
+ u32 val = I915_READ(DSPCNTR(plane));
- if (INTEL_INFO(dev_priv)->num_pipes == 1)
- return true;
+ return (val & DISPLAY_PLANE_ENABLE) == 0 ||
+ (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
- val = I915_READ(DSPCNTR(!crtc->plane));
+ if (INTEL_GEN(dev_priv) >= 4)
+ return;
- if ((val & DISPLAY_PLANE_ENABLE) &&
- (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
- return false;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
- return true;
+ if (intel_plane_mapping_ok(crtc, plane))
+ continue;
+
+ DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
+ plane->base.name);
+ intel_plane_disable_noatomic(crtc, plane);
+ }
}
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14749,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- continue;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ if (plane_state->base.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
}
}
- /* We need to sanitize the plane -> pipe mapping first because this will
- * disable the crtc (and hence change the state) if it is wrong. Note
- * that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
- bool plane;
-
- DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
- crtc->base.base.id, crtc->base.name);
-
- /* Pipe has the wrong plane attached and the plane is active.
- * Temporarily change the plane mapping and disable everything
- * ... */
- plane = crtc->plane;
- crtc->base.primary->state->visible = true;
- crtc->plane = !plane;
- intel_crtc_disable_noatomic(&crtc->base, ctx);
- crtc->plane = plane;
- }
-
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14880,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
-static bool primary_get_hw_state(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct intel_crtc *crtc)
{
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
- bool visible;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
- visible = crtc->active && primary_get_hw_state(primary);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ bool visible = plane->get_hw_state(plane);
- intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
- to_intel_plane_state(primary->base.state),
- visible);
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15095,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ intel_sanitize_plane_mapping(dev_priv);
+
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
@@ -15271,6 +15287,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6c7f8bca574e..5d77f75a9f9c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -862,6 +862,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
+ bool (*get_hw_state)(struct intel_plane *plane);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
+bool skl_plane_get_hw_state(struct intel_plane *plane);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..6074e04dc99f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ if (ret)
+ return ret;
+
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf65288ffff..5809b29044fc 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+ DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e25607435..e71a8cd50498 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+ if (i915_gem_request_completed(request))
+ return;
+
if (prio <= READ_ONCE(request->priotree.priority))
return;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430fccdc..55ea5eb3b7df 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c63d3b..7e115f3927f6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC5\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4fcf80ca91dd..4a8a5d918a83 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+bool
+skl_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static void
chv_update_csc(struct intel_plane *plane, uint32_t format)
{
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static int
intel_check_sprite_plane(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->get_hw_state = vlv_plane_get_hw_state;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->get_hw_state = ivb_plane_get_hw_state;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
+ intel_plane->get_hw_state = g4x_plane_get_hw_state;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0760b93e9d1f..baab93398e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2615912430cc..ef687414969e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
- if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ if (!nouveau_drm_use_coherent_gpu_mapping(drm))
nvbo->force_coherent = true;
}
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
+ if ((flags & TTM_PL_FLAG_TT) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Select this page size if it's the first that supports
@@ -1446,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
args.nv50.ro = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
+ argc = sizeof(args.nv50);
break;
case NVIF_CLASS_MEM_GF100:
args.gf100.version = 0;
args.gf100.ro = 0;
args.gf100.kind = mem->kind;
+ argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
@@ -1458,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
}
ret = nvif_object_map_handle(&mem->mem.object,
- &argc, argc,
+ &args, argc,
&handle, &length);
if (ret != 1)
return ret ? ret : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be3b913..56fe261b6268 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
work->cli = cli;
mutex_lock(&cli->lock);
list_add_tail(&work->head, &cli->worker);
- mutex_unlock(&cli->lock);
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
nouveau_cli_work_fence(fence, &work->cb);
+ mutex_unlock(&cli->lock);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3331e82ae9e7..96f6bd8aee5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -157,8 +157,8 @@ struct nouveau_drm {
struct nvif_object copy;
int mtrr;
int type_vram;
- int type_host;
- int type_ncoh;
+ int type_host[2];
+ int type_ncoh[2];
} ttm;
/* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
return dev->dev_private;
}
+static inline bool
+nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
+{
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
+}
+
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
bool nouveau_pmops_runtime(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c533d8e04afc..be7357bf2246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
- if (nouveau_fb->nvbo) {
+ if (nouveau_fb && nouveau_fb->nvbo) {
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 589a9621db76..c002f8968507 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
u8 type;
int ret;
- if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
- type = drm->ttm.type_ncoh;
+ if (!nouveau_drm_use_coherent_gpu_mapping(drm))
+ type = drm->ttm.type_ncoh[!!mem->kind];
else
- type = drm->ttm.type_host;
+ type = drm->ttm.type_host[0];
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mem->comp = mem->kind = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 08b974b30482..dff51a0ee028 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
drm->ttm.mem_global_ref.release = NULL;
}
-int
-nouveau_ttm_init(struct nouveau_drm *drm)
+static int
+nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
- struct drm_device *dev = drm->dev;
- int typei, ret;
+ int typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
- NVIF_MEM_COHERENT);
+ kind | NVIF_MEM_COHERENT);
if (typei < 0)
return -ENOSYS;
- drm->ttm.type_host = typei;
+ drm->ttm.type_host[!!kind] = typei;
- typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
if (typei < 0)
return -ENOSYS;
- drm->ttm.type_ncoh = typei;
+ drm->ttm.type_ncoh[!!kind] = typei;
+ return 0;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_pci *pci = device->pci;
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ struct drm_device *dev = drm->dev;
+ int typei, ret;
+
+ ret = nouveau_ttm_init_host(drm, 0);
+ if (ret)
+ return ret;
+
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ drm->client.device.info.chipset != 0x50) {
+ ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
+ if (ret)
+ return ret;
+ }
if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 9e2628dd8e4d..f5371d96b003 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
nvif_vmm_put(&vma->vmm->vmm, &tmp);
}
list_del(&vma->head);
- *pvma = NULL;
kfree(*pvma);
+ *pvma = NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e14643615698..08e77cd55e6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
.imem = gk20a_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp10b_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp10b_mmu_new,
.secboot = gp10b_secboot_new,
.pmu = gm20b_pmu_new,
.timer = gk20a_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a37b4f3..700fc754f28a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gf119_sor_dp_pattern,
+ .drive = gf119_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 9646adec57cb..243f0a5c8a62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -73,7 +73,8 @@ static int
nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- bar->func->bar1.fini(bar);
+ if (bar->func->bar1.fini)
+ bar->func->bar1.fini(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index b10077d38839..35878fb538f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -26,7 +26,6 @@ gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
- .bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 972370ed36f0..7c7efa4ea0d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
+ case 0x20:
case 0x21:
case 0x30:
case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
if (data && idx < *cnt) {
u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
switch (*ver * !!outp) {
+ case 0x20:
case 0x21:
case 0x30:
*hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
info->type = nvbios_rd16(bios, data + 0x00);
info->mask = nvbios_rd16(bios, data + 0x02);
switch (*ver) {
+ case 0x20:
+ info->mask |= 0x00c0; /* match any link */
+ /* fall-through */
case 0x21:
case 0x30:
info->flags = nvbios_rd08(bios, data + 0x05);
info->script[0] = nvbios_rd16(bios, data + 0x06);
info->script[1] = nvbios_rd16(bios, data + 0x08);
- info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
+ if (*len >= 0x0c)
+ info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0f) {
info->script[2] = nvbios_rd16(bios, data + 0x0c);
info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
memset(info, 0x00, sizeof(*info));
if (data) {
switch (*ver) {
+ case 0x20:
case 0x21:
info->dc = nvbios_rd08(bios, data + 0x02);
info->pe = nvbios_rd08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 1ba7289684aa..db48a1daca0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
- refcount_inc(&iobj->maps);
+ refcount_set(&iobj->maps, 1);
}
mutex_unlock(&imem->subdev.mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 352a65f9371c..67ee983bb026 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
nvkm-y += nvkm/subdev/mmu/nv44.o
nvkm-y += nvkm/subdev/mmu/nv50.o
nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
nvkm-y += nvkm/subdev/mmu/gf100.o
nvkm-y += nvkm/subdev/mmu/gk104.o
nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
nvkm-y += nvkm/subdev/mmu/vmmnv41.o
nvkm-y += nvkm/subdev/mmu/vmmnv44.o
nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
nvkm-y += nvkm/subdev/mmu/vmmgf100.o
nvkm-y += nvkm/subdev/mmu/vmmgk104.o
nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 000000000000..0527b50730d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 6d8f61ea467a..da06e64d8a7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
const struct nvkm_vmm_desc_func *func;
};
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 000000000000..e63d984cbfd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 863a2edd9861..64f75d906202 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -32,7 +32,7 @@ static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
- u64 next = addr | map->type, data;
+ u64 next = addr + map->type, data;
u32 pten;
int log2blk;
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
- const u64 data = *map->dma++ | map->type;
+ const u64 data = *map->dma++ + map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
.pde = nv50_vmm_pgd_pde,
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_12[] = {
{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_16[] = {
{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static void
+void
nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
mutex_unlock(&subdev->mutex);
}
-static int
+int
nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return 0;
}
-static void
+void
nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
}
}
-static int
+int
nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index b1b1f3626b96..ee2431a7804e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device;
bool handled = false;
+
+ if (pci->irq < 0)
+ return IRQ_HANDLED;
+
nvkm_mc_intr_unarm(device);
if (pci->msi)
pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci->irq >= 0) {
- free_irq(pci->irq, pci);
- pci->irq = -1;
- }
-
if (pci->agp.bridge)
nvkm_agp_fini(pci);
@@ -108,8 +107,20 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci_is_pcie(pci->pdev))
- return nvkm_pcie_oneinit(pci);
+ struct pci_dev *pdev = pci->pdev;
+ int ret;
+
+ if (pci_is_pcie(pci->pdev)) {
+ ret = nvkm_pcie_oneinit(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+ if (ret)
+ return ret;
+
+ pci->irq = pdev->irq;
return 0;
}
@@ -117,7 +128,6 @@ static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- struct pci_dev *pdev = pci->pdev;
int ret;
if (pci->agp.bridge) {
@@ -131,21 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init)
pci->func->init(pci);
- ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
- if (ret)
- return ret;
+ /* Ensure MSI interrupts are armed, for the case where there are
+ * already interrupts pending (for whatever reason) at load time.
+ */
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
- pci->irq = pdev->irq;
- return ret;
+ return 0;
}
static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
+
nvkm_agp_dtor(pci);
+
+ if (pci->irq >= 0) {
+ /* freq_irq() will call the handler, we use pci->irq == -1
+ * to signal that it's been torn down and should be a noop.
+ */
+ int irq = pci->irq;
+ pci->irq = -1;
+ free_irq(irq, pci);
+ }
+
if (pci->msi)
pci_disable_msi(pci->pdev);
+
return nvkm_pci(subdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626eddf24d5..23db74ae1826 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
/* then read the message */
msg.len = cnt & 0xf;
+ if (msg.len > CEC_MAX_MSG_SIZE - 2)
+ msg.len = CEC_MAX_MSG_SIZE - 2;
msg.msg[0] = hdmi_read_reg(core->base,
HDMI_CEC_RX_CMD_HEADER);
msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
}
}
-static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
-{
- if (stat1 & 2) {
- u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
-
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_NACK |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
- } else if (stat1 == 0) {
- cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
- }
-}
-
void hdmi4_cec_irq(struct hdmi_core_data *core)
{
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
- if (stat0 & 0x40)
+ if (stat0 & 0x20) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
- else if (stat0 & 0x24)
- hdmi_cec_transmit_fifo_empty(core, stat1);
- if (stat1 & 2) {
+ } else if (stat1 & 0x02) {
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
cec_transmit_done(core->adap,
CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES,
0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
if (stat0 & 0x02)
hdmi_cec_received_msg(core);
- if (stat1 & 0x3)
- REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
/*
* Enable CEC interrupts:
* Transmit Buffer Full/Empty Change event
- * Transmitter FIFO Empty event
* Receiver FIFO Not Empty event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
/*
* Enable CEC interrupts:
- * RX FIFO Overrun Error event
- * Short Pulse Detected event
* Frame Retransmit Count Exceeded event
- * Start Bit Irregularity event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
/* cec calibration enable (self clearing) */
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fdc56c1c953..b9bfa806d346 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
- x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL)
- return -ENOMEM;
- y = kmalloc(ybuf_size, GFP_KERNEL);
- if (y == NULL) {
- kfree(x);
- return -ENOMEM;
- }
- if (copy_from_user(x, depth->x, xbuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
- if (copy_from_user(y, depth->y, xbuf_size)) {
+ x = memdup_user(depth->x, xbuf_size);
+ if (IS_ERR(x))
+ return PTR_ERR(x);
+ y = memdup_user(depth->y, ybuf_size);
+ if (IS_ERR(y)) {
kfree(x);
- kfree(y);
- return -EFAULT;
+ return PTR_ERR(y);
}
-
buffer_size = depth->n * sizeof(u32);
buffer = memdup_user(depth->buffer, buffer_size);
if (IS_ERR(buffer)) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index dda904ec0534..500b6fb3e028 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
}
+static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+ unsigned long rate = mode->clock * 1000;
+ unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+ long rounded_rate;
+
+ /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
+ if (rate > 165000000)
+ return MODE_CLOCK_HIGH;
+ rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+ if (rounded_rate > 0 &&
+ max_t(unsigned long, rounded_rate, rate) -
+ min_t(unsigned long, rounded_rate, rate) < diff)
+ return MODE_OK;
+ return MODE_NOCLOCK;
+}
+
static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.atomic_check = sun4i_hdmi_atomic_check,
.disable = sun4i_hdmi_disable,
.enable = sun4i_hdmi_enable,
.mode_set = sun4i_hdmi_mode_set,
+ .mode_valid = sun4i_hdmi_mode_valid,
};
static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index dc332ea56f6c..3ecffa52c814 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (!best_parent ||
+ abs(rate - rounded / i / j) <
+ abs(rate - best_parent / best_half /
+ best_div)) {
best_parent = rounded;
- best_div = i;
+ best_half = i;
+ best_div = j;
}
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e122f5b2a395..f4284b51bdca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
if (IS_ERR(tcon->crtc)) {
dev_err(dev, "Couldn't create our CRTC\n");
ret = PTR_ERR(tcon->crtc);
- goto err_free_clocks;
+ goto err_free_dotclock;
}
ret = sun4i_rgb_init(drm, tcon);
if (ret < 0)
- goto err_free_clocks;
+ goto err_free_dotclock;
if (tcon->quirks->needs_de_be_mux) {
/*
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1dedac802..476079f1255f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
name, err);
goto remove;
}
+ } else {
+ /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+ sor->clk_out = sor->clk;
}
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 44343a2bf55c..5d252fb27a82 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
+ shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0;
unsigned i, j, cpages;
unsigned npages = 1 << order;
- unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+ unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -1006,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89a83a9..c94cce96544c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, j, unref_list_count, prev_idx;
+ unsigned int i, j, k, unref_list_count;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return;
}
- prev_idx = 0;
+ k = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ kernel_state->bo[k++] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
* because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
- kernel_state->bo[j + prev_idx] = &bo->base.base;
- j++;
+ kernel_state->bo[k++] = &bo->base.base;
}
- prev_idx = j + 1;
}
+ WARN_ON_ONCE(k != state->bo_count);
+
if (exec[0])
state->start_bin = exec[0]->ct0ca;
if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}
+static void
+vc4_flush_texture_caches(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ V3D_WRITE(V3D_L2CACTL,
+ V3D_L2CACTL_L2CCLR);
+
+ V3D_WRITE(V3D_SLCACTL,
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
+}
+
/* Sets the registers for the next job to be actually be executed in
* the hardware.
*
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
+ /* A previous RCL may have written to one of our textures, and
+ * our full cache flush at bin time may have occurred before
+ * that RCL completed. Flush the texture cache now, but not
+ * the instructions or uniforms (since we don't write those
+ * from an RCL).
+ */
+ vc4_flush_texture_caches(dev);
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
@@ -888,8 +909,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* If we got force-completed because of GPU reset rather than
* through our IRQ handler, signal the fence now.
*/
- if (exec->fence)
+ if (exec->fence) {
dma_fence_signal(exec->fence);
+ dma_fence_put(exec->fence);
+ }
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e5377993..3dd62d75f531 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
list_move_tail(&exec->head, &vc4->job_done_list);
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
+ dma_fence_put(exec->fence);
exec->fence = NULL;
}
vc4_submit_next_render_job(dev);
@@ -208,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- /* Undo the effects of a previous vc4_irq_uninstall. */
- enable_irq(dev->irq);
-
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43840b8..493f392b3a0a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
return ret;
vc4_v3d_init_hw(vc4->dev);
+
+ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+ enable_irq(vc4->dev->irq);
vc4_irq_postinstall(vc4->dev);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7e5f30e234b1..d08753e8fd94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -713,7 +713,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern unsigned int vmw_fops_poll(struct file *filp,
+extern __poll_t vmw_fops_poll(struct file *filp,
struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..87e8af5776a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
}
view_type = vmw_view_cmd_to_type(header->id);
+ if (view_type == vmw_view_max)
+ return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 01be355525e4..67f844678ac8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -412,7 +412,7 @@ out_clips:
* Wrapper around the drm_poll function that makes sure the device is
* processing the fifo if drm_poll decides to wait.
*/
-unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740b3724..fcd58145d0da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
/* Mapping is managed by prepare_fb/cleanup_fb */
- memset(&vps->guest_map, 0, sizeof(vps->guest_map));
memset(&vps->host_map, 0, sizeof(vps->host_map));
vps->cpp = 0;
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
/* Should have been freed by cleanup_fb */
- if (vps->guest_map.virtual) {
- DRM_ERROR("Guest mapping not freed\n");
- ttm_bo_kunmap(&vps->guest_map);
- }
-
if (vps->host_map.virtual) {
DRM_ERROR("Host mapping not freed\n");
ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
*/
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- return -ENOSYS;
+ return -EINVAL;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c8389ff21..cd9da2dd79af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
int pinned;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8a09807c5de..3824595fece1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bc5f6026573d..63a4cd794b73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437fd787..b68d74888ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
bool defined;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
s32 src_pitch, dst_pitch;
u8 *src, *dst;
bool not_used;
-
+ struct ttm_bo_kmap_obj guest_map;
+ int ret;
if (!dirty->num_hits)
return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
+ ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+ &guest_map);
+ if (ret) {
+ DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+ ret);
+ goto out_cleanup;
+ }
/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+ dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+ ttm_bo_kunmap(&guest_map);
out_cleanup:
ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->guest_map.virtual)
- ttm_bo_kunmap(&vps->guest_map);
-
if (vps->host_map.virtual)
ttm_bo_kunmap(&vps->host_map);
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
*/
if (vps->content_fb_type == SEPARATE_DMA &&
!(dev_priv->capabilities & SVGA_CAP_3D)) {
-
- struct vmw_framebuffer_dmabuf *new_vfbd;
-
- new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
- ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
- NULL);
- if (ret)
- goto out_srf_unpin;
-
- ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
- new_vfbd->buffer->base.num_pages,
- &vps->guest_map);
-
- ttm_bo_unreserve(&new_vfbd->buffer->base);
-
- if (ret) {
- DRM_ERROR("Failed to map content buffer to CPU\n");
- goto out_srf_unpin;
- }
-
ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
vps->surf->res.backup->base.num_pages,
&vps->host_map);
if (ret) {
DRM_ERROR("Failed to map display buffer to CPU\n");
- ttm_bo_kunmap(&vps->guest_map);
goto out_srf_unpin;
}
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->display_srf = vps->surf;
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
if (!stdu->defined)
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d35d6d271f3f..dfd8d0048980 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1266,7 +1266,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
+static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait)
{
pr_debug("%s\n", __func__);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 779c5ae47f36..19c499f5623d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -280,6 +280,7 @@ config HID_ELECOM
---help---
Support for ELECOM devices:
- BM084 Bluetooth Mouse
+ - EX-G Trackball (Wired and wireless)
- DEFT Trackball (Wired and wireless)
- HUGE Trackball (Wired and wireless)
@@ -396,6 +397,17 @@ config HID_ITE
---help---
Support for ITE devices not fully compliant with HID standard.
+config HID_JABRA
+ tristate "Jabra USB HID Driver"
+ depends on HID
+ ---help---
+ Support for Jabra USB HID devices.
+
+ Prevents mapping of vendor defined HID usages to input events. Without
+ this driver HID reports from Jabra devices may incorrectly be seen as
+ mouse button events.
+ Say M here if you may ever plug in a Jabra USB device.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 235bd2a7b333..eb13b9e92d85 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the HID driver
#
-hid-y := hid-core.o hid-input.o
+hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
obj-$(CONFIG_HID) += hid.o
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_ITE) += hid-ite.o
+obj-$(CONFIG_HID_JABRA) += hid-jabra.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1bb7b63b3150..88b9703318e4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -26,6 +26,7 @@
* any later version.
*/
+#include <linux/dmi.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input/mt.h>
@@ -119,6 +120,24 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.max_contacts = 5,
};
+static const struct asus_touchpad_info asus_t100ha_tp = {
+ .max_x = 2640,
+ .max_y = 1320,
+ .res_x = 30, /* units/mm */
+ .res_y = 29, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t200ta_tp = {
+ .max_x = 3120,
+ .max_y = 1716,
+ .res_x = 30, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
static const struct asus_touchpad_info asus_t100chi_tp = {
.max_x = 2640,
.max_y = 1320,
@@ -606,7 +625,17 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING;
- drvdata->tp = &asus_t100ta_tp;
+ /*
+ * The T100HA uses the same USB-ids as the T100TAF and
+ * the T200TA uses the same USB-ids as the T100TA, while
+ * both have different max x/y values as the T100TA[F].
+ */
+ if (dmi_match(DMI_PRODUCT_NAME, "T100HAN"))
+ drvdata->tp = &asus_t100ha_tp;
+ else if (dmi_match(DMI_PRODUCT_NAME, "T200TA"))
+ drvdata->tp = &asus_t200ta_tp;
+ else
+ drvdata->tp = &asus_t100ta_tp;
}
}
@@ -686,9 +715,10 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
- /* For the T100TA keyboard dock */
+ /* For the T100TA/T200TA keyboard dock */
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
- *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+ (*rsize == 76 || *rsize == 101) &&
+ rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
@@ -751,7 +781,10 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+ USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f3fcb836a1f9..c2560aae5542 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
@@ -830,31 +830,6 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
- /* fall back to generic driver in case specific driver doesn't exist */
- switch (hid->group) {
- case HID_GROUP_MULTITOUCH_WIN_8:
- /* fall-through */
- case HID_GROUP_MULTITOUCH:
- if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_SENSOR_HUB:
- if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_RMI:
- if (!IS_ENABLED(CONFIG_HID_RMI))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_WACOM:
- if (!IS_ENABLED(CONFIG_HID_WACOM))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_LOGITECH_DJ_DEVICE:
- if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
- hid->group = HID_GROUP_GENERIC;
- break;
- }
vfree(parser);
return 0;
}
@@ -1597,8 +1572,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(hid_input_report);
-static bool hid_match_one_id(struct hid_device *hdev,
- const struct hid_device_id *id)
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id)
{
return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
@@ -1606,7 +1581,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1862,541 +1837,6 @@ void hid_hw_close(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_hw_close);
-/*
- * A list of devices for which there is a specialized driver on HID bus.
- *
- * Please note that for multitouch devices (driven by hid-multitouch driver),
- * there is a proper autodetection and autoloading in place (based on presence
- * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
- * as we are doing the right thing in hid_scan_usage().
- *
- * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
- * physical is found inside a usage page of type sensor, hid-sensor-hub will be
- * used as a driver. See hid_scan_report().
- */
-static const struct hid_device_id hid_have_special_driver[] = {
-#if IS_ENABLED(CONFIG_HID_A4TECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACRUX)
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ALPS)
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLE)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLEIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ASUS)
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_AUREAL)
- { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BELKIN)
- { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BETOP_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHERRY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHICONY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CMEDIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CORSAIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CP2112)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CYPRESS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELECOM)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELO)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EMS_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EZKEY)
- { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GEMBIRD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GFRM)
- { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
- { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GREENASIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GT683R)
- { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GYRATION)
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
-#endif
-#if IS_ENABLED(CONFIG_HID_HOLTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ICADE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KENSINGTON)
- { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KYE)
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LCPOWER)
- { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LENOVO)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MICROSOFT)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MONTEREY)
- { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WIIMOTE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTI)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTRIG)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ORTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PENMOUNT)
- { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PETALYNX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PICOLCD)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRIMAX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RETRODE)
- { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RMI)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ROCCAT)
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAITEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAMSUNG)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SONY)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
- { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_STEELSERIES)
- { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SUNPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-#endif
-#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TIVO)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TOPSEED)
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TWINHAN)
- { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UCLOGIC)
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
- { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WALTOP)
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-#endif
-#if IS_ENABLED(CONFIG_HID_XINMO)
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZYDACRON)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-#endif
- { }
-};
-
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
@@ -2463,8 +1903,8 @@ static void hid_free_dynids(struct hid_driver *hdrv)
spin_unlock(&hdrv->dyn_lock);
}
-static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
- struct hid_driver *hdrv)
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv)
{
struct hid_dynid *dynid;
@@ -2479,6 +1919,7 @@ static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
return hid_match_id(hdev, hdrv->id_table);
}
+EXPORT_SYMBOL_GPL(hid_match_device);
static int hid_bus_match(struct device *dev, struct device_driver *drv)
{
@@ -2508,6 +1949,23 @@ static int hid_device_probe(struct device *dev)
goto unlock;
}
+ if (hdrv->match) {
+ if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ } else {
+ /*
+ * hid-generic implements .match(), so if
+ * hid_ignore_special_drivers is set, we can safely
+ * return.
+ */
+ if (hid_ignore_special_drivers) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ }
+
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
@@ -2604,7 +2062,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static struct bus_type hid_bus_type = {
+struct bus_type hid_bus_type = {
.name = "hid",
.dev_groups = hid_dev_groups,
.drv_groups = hid_drv_groups,
@@ -2613,315 +2071,7 @@ static struct bus_type hid_bus_type = {
.remove = hid_device_remove,
.uevent = hid_uevent,
};
-
-/* a list of devices that shouldn't be handled by HID core at all */
-static const struct hid_device_id hid_ignore_list[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
- { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
- { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
-#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
- { }
-};
-
-/**
- * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
- *
- * There are composite devices for which we want to ignore only a certain
- * interface. This is a list of devices for which only the mouse interface will
- * be ignored. This allows a dedicated driver to take care of the interface.
- */
-static const struct hid_device_id hid_mouse_ignore_list[] = {
- /* appletouch driver */
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
- { }
-};
-
-bool hid_ignore(struct hid_device *hdev)
-{
- if (hdev->quirks & HID_QUIRK_NO_IGNORE)
- return false;
- if (hdev->quirks & HID_QUIRK_IGNORE)
- return true;
-
- switch (hdev->vendor) {
- case USB_VENDOR_ID_CODEMERCS:
- /* ignore all Code Mercenaries IOWarrior devices */
- if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
- hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
- return true;
- break;
- case USB_VENDOR_ID_LOGITECH:
- if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
- hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
- return true;
- /*
- * The Keene FM transmitter USB device has the same USB ID as
- * the Logitech AudioHub Speaker, but it should ignore the hid.
- * Check if the name is that of the Keene device.
- * For reference: the name of the AudioHub is
- * "HOLTEK AudioHub Speaker".
- */
- if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
- !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
- return true;
- break;
- case USB_VENDOR_ID_SOUNDGRAPH:
- if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
- hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
- return true;
- break;
- case USB_VENDOR_ID_HANWANG:
- if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
- hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
- return true;
- break;
- case USB_VENDOR_ID_JESS:
- if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
- hdev->type == HID_TYPE_USBNONE)
- return true;
- break;
- case USB_VENDOR_ID_VELLEMAN:
- /* These are not HID devices. They are handled by comedi. */
- if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
- (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
- return true;
- break;
- case USB_VENDOR_ID_ATMEL_V_USB:
- /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
- * it has the same USB ID as many Atmel V-USB devices. This
- * usb radio is handled by radio-ma901.c driver so we want
- * ignore the hid. Check the name, bus, product and ignore
- * if we have MA901 usb radio.
- */
- if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
- hdev->bus == BUS_USB &&
- strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
- return true;
- break;
- }
-
- if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
- return true;
-
- return !!hid_match_id(hdev, hid_ignore_list);
-}
-EXPORT_SYMBOL_GPL(hid_ignore);
+EXPORT_SYMBOL(hid_bus_type);
int hid_add_device(struct hid_device *hdev)
{
@@ -2931,6 +2081,8 @@ int hid_add_device(struct hid_device *hdev)
if (WARN_ON(hdev->status & HID_STAT_ADDED))
return -EBUSY;
+ hdev->quirks = hid_lookup_quirk(hdev);
+
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
if (hid_ignore(hdev))
@@ -2960,7 +2112,7 @@ int hid_add_device(struct hid_device *hdev)
if (hid_ignore_special_drivers) {
hdev->group = HID_GROUP_GENERIC;
} else if (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver)) {
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
@@ -3044,6 +2196,29 @@ void hid_destroy_device(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_destroy_device);
+
+static int __bus_add_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *added_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_add_driver)
+ hdrv->bus_add_driver(added_hdrv);
+
+ return 0;
+}
+
+static int __bus_removed_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *removed_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_removed_driver)
+ hdrv->bus_removed_driver(removed_hdrv);
+
+ return 0;
+}
+
int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
const char *mod_name)
{
@@ -3055,6 +2230,8 @@ int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
INIT_LIST_HEAD(&hdrv->dyn_list);
spin_lock_init(&hdrv->dyn_lock);
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_add_driver);
+
return driver_register(&hdrv->driver);
}
EXPORT_SYMBOL_GPL(__hid_register_driver);
@@ -3063,6 +2240,8 @@ void hid_unregister_driver(struct hid_driver *hdrv)
{
driver_unregister(&hdrv->driver);
hid_free_dynids(hdrv);
+
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
}
EXPORT_SYMBOL_GPL(hid_unregister_driver);
@@ -3117,6 +2296,7 @@ static void __exit hid_exit(void)
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
+ hid_quirks_exit(HID_BUS_ANY);
}
module_init(hid_init);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 68cdc962265b..271f31461da4 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
(u8 *)&word, 2);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- size = I2C_SMBUS_BLOCK_DATA;
- /* fallthrough */
+ if (read_write == I2C_SMBUS_READ) {
+ read_length = data->block[0];
+ count = cp2112_write_read_req(buf, addr, read_length,
+ command, NULL, 0);
+ } else {
+ count = cp2112_write_req(buf, addr, command,
+ data->block + 1,
+ data->block[0]);
+ }
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (I2C_SMBUS_READ == read_write) {
count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_WORD_DATA:
data->word = le16_to_cpup((__le16 *)buf);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(data->block + 1, buf, read_length);
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (read_length > I2C_SMBUS_BLOCK_MAX) {
ret = -EPROTO;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5271db593478..c783fd5ef809 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1179,7 +1179,7 @@ out:
return ret;
}
-static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
{
struct hid_debug_list *list = file->private_data;
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 54aeea57d209..1a1ecc491c02 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,9 +1,15 @@
/*
- * HID driver for ELECOM devices.
+ * HID driver for ELECOM devices:
+ * - BM084 Bluetooth Mouse
+ * - EX-G Trackball (Wired and wireless)
+ * - DEFT Trackball (Wired and wireless)
+ * - HUGE Trackball (Wired and wireless)
+ *
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+ * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
*/
/*
@@ -19,6 +25,34 @@
#include "hid-ids.h"
+/*
+ * Certain ELECOM mice misreport their button count meaning that they only work
+ * correctly with the ELECOM mouse assistant software which is unavailable for
+ * Linux. A four extra INPUT reports and a FEATURE report are described by the
+ * report descriptor but it does not appear that these enable software to
+ * control what the extra buttons map to. The only simple and straightforward
+ * solution seems to involve fixing up the report descriptor.
+ *
+ * Report descriptor format:
+ * Positions 13, 15, 21 and 31 store the button bit count, button usage minimum,
+ * button usage maximum and padding bit count respectively.
+ */
+#define MOUSE_BUTTONS_MAX 8
+static void mouse_button_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int rsize,
+ int nbuttons)
+{
+ if (rsize < 32 || rdesc[12] != 0x95 ||
+ rdesc[14] != 0x75 || rdesc[15] != 0x01 ||
+ rdesc[20] != 0x29 || rdesc[30] != 0x75)
+ return;
+ hid_info(hdev, "Fixing up Elecom mouse button count\n");
+ nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX);
+ rdesc[13] = nbuttons;
+ rdesc[21] = nbuttons;
+ rdesc[31] = MOUSE_BUTTONS_MAX - nbuttons;
+}
+
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -31,45 +65,15 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x00;
}
break;
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRED:
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRELESS:
+ mouse_button_fixup(hdev, rdesc, *rsize, 6);
+ break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
- /* The DEFT/HUGE trackball has eight buttons, but its descriptor
- * only reports five, disabling the three Fn buttons on the top
- * of the mouse.
- *
- * Apply the following diff to the descriptor:
- *
- * Collection (Physical), Collection (Physical),
- * Report ID (1), Report ID (1),
- * Report Count (5), -> Report Count (8),
- * Report Size (1), Report Size (1),
- * Usage Page (Button), Usage Page (Button),
- * Usage Minimum (01h), Usage Minimum (01h),
- * Usage Maximum (05h), -> Usage Maximum (08h),
- * Logical Minimum (0), Logical Minimum (0),
- * Logical Maximum (1), Logical Maximum (1),
- * Input (Variable), Input (Variable),
- * Report Count (1), -> Report Count (0),
- * Report Size (3), Report Size (3),
- * Input (Constant), Input (Constant),
- * Report Size (16), Report Size (16),
- * Report Count (2), Report Count (2),
- * Usage Page (Desktop), Usage Page (Desktop),
- * Usage (X), Usage (X),
- * Usage (Y), Usage (Y),
- * Logical Minimum (-32768), Logical Minimum (-32768),
- * Logical Maximum (32767), Logical Maximum (32767),
- * Input (Variable, Relative), Input (Variable, Relative),
- * End Collection, End Collection,
- */
- if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
- rdesc[13] = 8; /* Button/Variable Report Count */
- rdesc[21] = 8; /* Button/Variable Usage Maximum */
- rdesc[29] = 0; /* Button/Constant Report Count */
- }
+ mouse_button_fixup(hdev, rdesc, *rsize, 8);
break;
}
return rdesc;
@@ -77,6 +81,8 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index e288a4a06fe8..3c0a1bf433d7 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -24,8 +24,71 @@
#include <linux/hid.h>
+static struct hid_driver hid_generic;
+
+static int __unmap_hid_generic(struct device *dev, void *data)
+{
+ struct hid_driver *hdrv = data;
+ struct hid_device *hdev = to_hid_device(dev);
+
+ /* only unbind matching devices already bound to hid-generic */
+ if (hdev->driver != &hid_generic ||
+ hid_match_device(hdev, hdrv) == NULL)
+ return 0;
+
+ if (dev->parent) /* Needed for USB */
+ device_lock(dev->parent);
+ device_release_driver(dev);
+ if (dev->parent)
+ device_unlock(dev->parent);
+
+ return 0;
+}
+
+static void hid_generic_add_driver(struct hid_driver *hdrv)
+{
+ bus_for_each_dev(&hid_bus_type, NULL, hdrv, __unmap_hid_generic);
+}
+
+static void hid_generic_removed_driver(struct hid_driver *hdrv)
+{
+ int ret;
+
+ ret = driver_attach(&hid_generic.driver);
+}
+
+static int __check_hid_generic(struct device_driver *drv, void *data)
+{
+ struct hid_driver *hdrv = to_hid_driver(drv);
+ struct hid_device *hdev = data;
+
+ if (hdrv == &hid_generic)
+ return 0;
+
+ return hid_match_device(hdev, hdrv) != NULL;
+}
+
+static bool hid_generic_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ if (ignore_special_driver)
+ return true;
+
+ if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
+ return false;
+
+ /*
+ * If any other driver wants the device, leave the device to this other
+ * driver.
+ */
+ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic))
+ return false;
+
+ return true;
+}
+
static const struct hid_device_id hid_table[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_GENERIC, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, hid_table);
@@ -33,6 +96,9 @@ MODULE_DEVICE_TABLE(hid, hid_table);
static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
+ .match = hid_generic_match,
+ .bus_add_driver = hid_generic_add_driver,
+ .bus_removed_driver = hid_generic_removed_driver,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545fc3ae..edc0f64bb584 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -32,10 +32,6 @@
#ifdef CONFIG_HOLTEK_FF
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
-MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
-
/*
* These commands and parameters are currently known:
*
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
.probe = holtek_probe,
};
module_hid_driver(holtek_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5da3d6256d25..43ddcdfbd0da 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -178,7 +178,8 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
-#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807
#define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
#define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
@@ -370,6 +371,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRED 0x00fb
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRELESS 0x00fc
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
@@ -535,6 +538,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -1156,6 +1160,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-jabra.c b/drivers/hid/hid-jabra.c
new file mode 100644
index 000000000000..1f52daf14426
--- /dev/null
+++ b/drivers/hid/hid-jabra.c
@@ -0,0 +1,58 @@
+/*
+ * Jabra USB HID Driver
+ *
+ * Copyright (c) 2017 Niels Skou Olsen <nolsen@jabra.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define HID_UP_VENDOR_DEFINED_MIN 0xff000000
+#define HID_UP_VENDOR_DEFINED_MAX 0xffff0000
+
+static int jabra_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int is_vendor_defined =
+ ((usage->hid & HID_USAGE_PAGE) >= HID_UP_VENDOR_DEFINED_MIN &&
+ (usage->hid & HID_USAGE_PAGE) <= HID_UP_VENDOR_DEFINED_MAX);
+
+ dbg_hid("hid=0x%08x appl=0x%08x coll_idx=0x%02x usage_idx=0x%02x: %s\n",
+ usage->hid,
+ field->application,
+ usage->collection_index,
+ usage->usage_index,
+ is_vendor_defined ? "ignored" : "defaulted");
+
+ /* Ignore vendor defined usages, default map standard usages */
+ return is_vendor_defined ? -1 : 0;
+}
+
+static const struct hid_device_id jabra_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, jabra_devices);
+
+static struct hid_driver jabra_driver = {
+ .name = "jabra",
+ .id_table = jabra_devices,
+ .input_mapping = jabra_input_mapping,
+};
+module_hid_driver(jabra_driver);
+
+MODULE_AUTHOR("Niels Skou Olsen <nolsen@jabra.com>");
+MODULE_DESCRIPTION("Jabra USB HID Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 65ea23be9677..3b4739bde05d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -85,11 +85,12 @@ MODULE_LICENSE("GPL");
#define MT_IO_FLAGS_PENDING_SLOTS 2
struct mt_slot {
- __s32 x, y, cx, cy, p, w, h;
+ __s32 x, y, cx, cy, p, w, h, a;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
bool confidence_state; /* is the touch made by a finger? */
+ bool has_azimuth; /* the contact reports azimuth */
};
struct mt_class {
@@ -119,6 +120,10 @@ struct mt_device {
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
int cc_index; /* contact count field index in the report */
int cc_value_index; /* contact count value index in the field */
+ int scantime_index; /* scantime field index in the report */
+ int scantime_val_index; /* scantime value index in the field */
+ int prev_scantime; /* scantime reported in the previous packet */
+ int left_button_state; /* left button state */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned long initial_quirks; /* initial quirks state */
@@ -582,8 +587,15 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
- input_set_abs_params(hi->input,
- ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ /*
+ * Only set ABS_MT_ORIENTATION if it is not
+ * already set by the HID_DG_AZIMUTH usage.
+ */
+ if (!test_bit(ABS_MT_ORIENTATION,
+ hi->input->absbit))
+ input_set_abs_params(hi->input,
+ ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
mt_store_field(usage, td, hi);
return 1;
@@ -599,6 +611,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
mt_store_field(usage, td, hi);
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
+ td->scantime_index = field->index;
+ td->scantime_val_index = usage->usage_index;
return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
@@ -608,6 +626,21 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
+ case HID_DG_AZIMUTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_ORIENTATION);
+ /*
+ * Azimuth has the range of [0, MAX) representing a full
+ * revolution. Set ABS_MT_ORIENTATION to a quarter of
+ * MAX according the definition of ABS_MT_ORIENTATION
+ */
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ -field->logical_maximum / 4,
+ field->logical_maximum / 4,
+ cls->sn_move ?
+ field->logical_maximum / cls->sn_move : 0, 0);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
@@ -700,6 +733,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
int wide = (s->w > s->h);
int major = max(s->w, s->h);
int minor = min(s->w, s->h);
+ int orientation = wide;
+
+ if (s->has_azimuth)
+ orientation = s->a;
/*
* divided by two to match visual scale of touch
@@ -716,7 +753,8 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
input_event(input, EV_ABS, ABS_MT_DISTANCE,
!s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION,
+ orientation);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
@@ -734,10 +772,16 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
+ __s32 cls = td->mtclass.name;
+
+ if (cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL)
+ input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+
input_mt_sync_frame(input);
input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
+ td->left_button_state = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -778,9 +822,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
}
static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+ struct hid_usage *usage, __s32 value,
+ bool first_packet)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
@@ -832,11 +878,49 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT:
break;
+ case HID_DG_AZIMUTH:
+ /*
+ * Azimuth is counter-clockwise and ranges from [0, MAX)
+ * (a full revolution). Convert it to clockwise ranging
+ * [-MAX/2, MAX/2].
+ *
+ * Note that ABS_MT_ORIENTATION require us to report
+ * the limit of [-MAX/4, MAX/4], but the value can go
+ * out of range to [-MAX/2, MAX/2] to report an upside
+ * down ellipsis.
+ */
+ if (value > field->logical_maximum / 2)
+ value -= field->logical_maximum;
+ td->curdata.a = -value;
+ td->curdata.has_azimuth = true;
+ break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of
+ * a (possible) multi-packet frame.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ !first_packet)
+ return;
+
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ td->left_button_state |= value;
+ return;
+ }
+
if (usage->type)
input_event(input, usage->type, usage->code,
value);
@@ -855,9 +939,11 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
struct hid_field *field;
+ bool first_packet;
unsigned count;
- int r, n;
+ int r, n, scantime = 0;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
@@ -867,13 +953,31 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
+ if (td->scantime_index >= 0) {
+ field = report->field[td->scantime_index];
+ scantime = field->value[td->scantime_val_index];
+ }
if (td->cc_index >= 0) {
struct hid_field *field = report->field[td->cc_index];
int value = field->value[td->cc_value_index];
- if (value)
+
+ /*
+ * For Win8 PTPs the first packet (td->num_received == 0) may
+ * have a contactcount of 0 if there only is a button event.
+ * We double check that this is not a continuation packet
+ * of a possible multi-packet frame be checking that the
+ * timestamp has changed.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ td->num_received == 0 && td->prev_scantime != scantime)
+ td->num_expected = value;
+ /* A non 0 contact count always indicates a first packet */
+ else if (value)
td->num_expected = value;
}
+ td->prev_scantime = scantime;
+ first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -883,7 +987,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
for (n = 0; n < count; n++)
mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n]);
+ field->value[n], first_packet);
}
if (td->num_received >= td->num_expected)
@@ -1329,6 +1433,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->maxcontact_report_id = -1;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
+ td->scantime_index = -1;
td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
@@ -1649,14 +1754,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
- /* Panasonic panels */
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT780) },
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT880) },
-
/* Novatek Panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
@@ -1667,6 +1764,14 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_NTRIG, 0x1b05) },
+ /* Panasonic panels */
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT780) },
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT880) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
new file mode 100644
index 000000000000..5f6035a5ce36
--- /dev/null
+++ b/drivers/hid/hid-quirks.c
@@ -0,0 +1,1276 @@
+/*
+ * HID quirks support for Linux
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2006-2007 Jiri Kosina
+ * Copyright (c) 2007 Paul Walmsley
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "hid-ids.h"
+
+/*
+ * Alphabetically sorted by vendor then product.
+ */
+
+static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN), HID_QUIRK_MULTI_INPUT},
+ { HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK), HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL), HID_QUIRK_HIDINPUT_FORCE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+
+ { 0 }
+};
+
+/*
+ * A list of devices for which there is a specialized driver on HID bus.
+ *
+ * Please note that for multitouch devices (driven by hid-multitouch driver),
+ * there is a proper autodetection and autoloading in place (based on presence
+ * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
+ * as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
+ */
+static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELECOM)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+ { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+ { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ITE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_JABRA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LENOVO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RETRODE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ROCCAT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAITEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+#endif
+ { }
+};
+
+/* a list of devices that shouldn't be handled by HID core at all */
+static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
+#endif
+ { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { }
+};
+
+/**
+ * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
+ *
+ * There are composite devices for which we want to ignore only a certain
+ * interface. This is a list of devices for which only the mouse interface will
+ * be ignored. This allows a dedicated driver to take care of the interface.
+ */
+static const struct hid_device_id hid_mouse_ignore_list[] = {
+ /* appletouch driver */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+};
+
+bool hid_ignore(struct hid_device *hdev)
+{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
+ switch (hdev->vendor) {
+ case USB_VENDOR_ID_CODEMERCS:
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_LOGITECH:
+ if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
+ hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
+ return true;
+ /*
+ * The Keene FM transmitter USB device has the same USB ID as
+ * the Logitech AudioHub Speaker, but it should ignore the hid.
+ * Check if the name is that of the Keene device.
+ * For reference: the name of the AudioHub is
+ * "HOLTEK AudioHub Speaker".
+ */
+ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
+ !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
+ return true;
+ break;
+ case USB_VENDOR_ID_SOUNDGRAPH:
+ if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
+ hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_HANWANG:
+ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
+ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
+ case USB_VENDOR_ID_ATMEL_V_USB:
+ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+ * it has the same USB ID as many Atmel V-USB devices. This
+ * usb radio is handled by radio-ma901.c driver so we want
+ * ignore the hid. Check the name, bus, product and ignore
+ * if we have MA901 usb radio.
+ */
+ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+ hdev->bus == BUS_USB &&
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+ return true;
+ break;
+ case USB_VENDOR_ID_ELAN:
+ /*
+ * Many Elan devices have a product id of 0x0401 and are handled
+ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
+ * is not (and cannot be) handled by that driver ->
+ * Ignore all 0x0401 devs except for the ELAN0800 dev.
+ */
+ if (hdev->product == 0x0401 &&
+ strncmp(hdev->name, "ELAN0800", 8) != 0)
+ return true;
+ break;
+ }
+
+ if (hdev->type == HID_TYPE_USBMOUSE &&
+ hid_match_id(hdev, hid_mouse_ignore_list))
+ return true;
+
+ return !!hid_match_id(hdev, hid_ignore_list);
+}
+EXPORT_SYMBOL_GPL(hid_ignore);
+
+/* Dynamic HID quirks list - specified at runtime */
+struct quirks_list_struct {
+ struct hid_device_id hid_bl_item;
+ struct list_head node;
+};
+
+static LIST_HEAD(dquirks_list);
+static DEFINE_MUTEX(dquirks_lock);
+
+/* Runtime ("dynamic") quirks manipulation functions */
+
+/**
+ * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Scans dquirks_list for a matching dynamic quirk and returns
+ * the pointer to the relevant struct hid_device_id if found.
+ * Must be called with a read lock held on dquirks_lock.
+ *
+ * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ */
+static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
+{
+ struct quirks_list_struct *q;
+ struct hid_device_id *bl_entry = NULL;
+
+ list_for_each_entry(q, &dquirks_list, node) {
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+ bl_entry = &q->hid_bl_item;
+ break;
+ }
+ }
+
+ if (bl_entry != NULL)
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ bl_entry->driver_data, bl_entry->vendor,
+ bl_entry->product);
+
+ return bl_entry;
+}
+
+
+/**
+ * hid_modify_dquirk: add/replace a HID quirk
+ * @id: the HID device to match
+ * @quirks: the unsigned long quirks value to add/replace
+ *
+ * Description:
+ * If an dynamic quirk exists in memory for this device, replace its
+ * quirks value with what was provided. Otherwise, add the quirk
+ * to the dynamic quirks list.
+ *
+ * Returns: 0 OK, -error on failure.
+ */
+static int hid_modify_dquirk(const struct hid_device_id *id,
+ const unsigned long quirks)
+{
+ struct hid_device *hdev;
+ struct quirks_list_struct *q_new, *q;
+ int list_edited = 0;
+ int ret = 0;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
+ if (!q_new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdev->bus = q_new->hid_bl_item.bus = id->bus;
+ hdev->group = q_new->hid_bl_item.group = id->group;
+ hdev->vendor = q_new->hid_bl_item.vendor = id->vendor;
+ hdev->product = q_new->hid_bl_item.product = id->product;
+ q_new->hid_bl_item.driver_data = quirks;
+
+ mutex_lock(&dquirks_lock);
+
+ list_for_each_entry(q, &dquirks_list, node) {
+
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+
+ list_replace(&q->node, &q_new->node);
+ kfree(q);
+ list_edited = 1;
+ break;
+
+ }
+
+ }
+
+ if (!list_edited)
+ list_add_tail(&q_new->node, &dquirks_list);
+
+ mutex_unlock(&dquirks_lock);
+
+ out:
+ kfree(hdev);
+ return ret;
+}
+
+/**
+ * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
+ *
+ * Description:
+ * Free all memory associated with dynamic quirks - called before
+ * module unload.
+ *
+ */
+static void hid_remove_all_dquirks(__u16 bus)
+{
+ struct quirks_list_struct *q, *temp;
+
+ mutex_lock(&dquirks_lock);
+ list_for_each_entry_safe(q, temp, &dquirks_list, node) {
+ if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) {
+ list_del(&q->node);
+ kfree(q);
+ }
+ }
+ mutex_unlock(&dquirks_lock);
+
+}
+
+/**
+ * hid_quirks_init: apply HID quirks specified at module load time
+ */
+int hid_quirks_init(char **quirks_param, __u16 bus, int count)
+{
+ struct hid_device_id id = { 0 };
+ int n = 0, m;
+ unsigned short int vendor, product;
+ u32 quirks;
+
+ id.bus = bus;
+
+ for (; n < count && quirks_param[n]; n++) {
+
+ m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
+ &vendor, &product, &quirks);
+
+ id.vendor = (__u16)vendor;
+ id.product = (__u16)product;
+
+ if (m != 3 ||
+ hid_modify_dquirk(&id, quirks) != 0) {
+ pr_warn("Could not parse HID quirk module param %s\n",
+ quirks_param[n]);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_quirks_init);
+
+/**
+ * hid_quirks_exit: release memory associated with dynamic_quirks
+ * @bus: a bus to match against
+ *
+ * Description:
+ * Release all memory associated with dynamic quirks for a given bus.
+ * Called upon module unload.
+ * Use HID_BUS_ANY to remove all dynamic quirks.
+ *
+ * Returns: nothing
+ */
+void hid_quirks_exit(__u16 bus)
+{
+ hid_remove_all_dquirks(bus);
+}
+EXPORT_SYMBOL_GPL(hid_quirks_exit);
+
+/**
+ * hid_gets_squirk: return any static quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Given a HID device, return a pointer to the quirked hid_device_id entry
+ * associated with that device.
+ *
+ * Returns: the quirks.
+ */
+static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+{
+ const struct hid_device_id *bl_entry;
+ unsigned long quirks = 0;
+
+ if (hid_match_id(hdev, hid_ignore_list))
+ quirks |= HID_QUIRK_IGNORE;
+
+ if (hid_match_id(hdev, hid_have_special_driver))
+ quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
+
+ bl_entry = hid_match_id(hdev, hid_quirks);
+ if (bl_entry != NULL)
+ quirks |= bl_entry->driver_data;
+
+ if (quirks)
+ dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ quirks, hdev->vendor, hdev->product);
+ return quirks;
+}
+
+/**
+ * hid_lookup_quirk: return any quirks associated with a HID device
+ * @hdev: the HID device to look for
+ *
+ * Description:
+ * Given a HID device, return any quirks associated with that device.
+ *
+ * Returns: an unsigned long quirks value.
+ */
+unsigned long hid_lookup_quirk(const struct hid_device *hdev)
+{
+ unsigned long quirks = 0;
+ const struct hid_device_id *quirk_entry = NULL;
+
+ /* NCR devices must not be queried for reports */
+ if (hdev->bus == BUS_USB &&
+ hdev->vendor == USB_VENDOR_ID_NCR &&
+ hdev->product >= USB_DEVICE_ID_NCR_FIRST &&
+ hdev->product <= USB_DEVICE_ID_NCR_LAST)
+ return HID_QUIRK_NO_INIT_REPORTS;
+
+ /* These devices must be ignored if version (bcdDevice) is too old */
+ if (hdev->bus == BUS_USB && hdev->vendor == USB_VENDOR_ID_JABRA) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_JABRA_SPEAK_410:
+ if (hdev->version < 0x0111)
+ return HID_QUIRK_IGNORE;
+ break;
+ case USB_DEVICE_ID_JABRA_SPEAK_510:
+ if (hdev->version < 0x0214)
+ return HID_QUIRK_IGNORE;
+ break;
+ }
+ }
+
+ mutex_lock(&dquirks_lock);
+ quirk_entry = hid_exists_dquirk(hdev);
+ if (quirk_entry)
+ quirks = quirk_entry->driver_data;
+ else
+ quirks = hid_gets_squirk(hdev);
+ mutex_unlock(&dquirks_lock);
+
+ return quirks;
+}
+EXPORT_SYMBOL_GPL(hid_lookup_quirk);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 0f43c4292685..c6c05df3e8d2 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -731,6 +731,7 @@ static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb28b87..317c9c2c0a7c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
+ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+ return;
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index fb77dec720a4..b7e86aba6f33 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -137,7 +137,7 @@ exit_unlock:
return retval;
}
-static unsigned int roccat_poll(struct file *file, poll_table *wait)
+static __poll_t roccat_poll(struct file *file, poll_table *wait)
{
struct roccat_reader *reader = file->private_data;
poll_wait(file, &reader->device->wait, wait);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 0bcf041368c7..21ed6c55c40a 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -702,11 +702,11 @@ static int hid_sensor_custom_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static unsigned int hid_sensor_custom_poll(struct file *file,
+static __poll_t hid_sensor_custom_poll(struct file *file,
struct poll_table_struct *wait)
{
struct hid_sensor_custom *sensor_inst;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sensor_inst = container_of(file->private_data,
struct hid_sensor_custom, custom_dev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b9dc3ac4d4aa..ccdc5f2d01b1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -473,6 +473,7 @@ struct motion_output_report_02 {
#define DS4_FEATURE_REPORT_0x02_SIZE 37
#define DS4_FEATURE_REPORT_0x05_SIZE 41
#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0xA3_SIZE 49
#define DS4_INPUT_REPORT_0x11_SIZE 78
#define DS4_OUTPUT_REPORT_0x05_SIZE 32
#define DS4_OUTPUT_REPORT_0x11_SIZE 78
@@ -544,6 +545,8 @@ struct sony_sc {
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
+ unsigned fw_version;
+ unsigned hw_version;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -627,6 +630,29 @@ static ssize_t ds4_store_poll_interval(struct device *dev,
static DEVICE_ATTR(bt_poll_interval, 0644, ds4_show_poll_interval,
ds4_store_poll_interval);
+static ssize_t sony_show_firmware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->fw_version);
+}
+
+static DEVICE_ATTR(firmware_version, 0444, sony_show_firmware_version, NULL);
+
+static ssize_t sony_show_hardware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->hw_version);
+}
+
+static DEVICE_ATTR(hardware_version, 0444, sony_show_hardware_version, NULL);
static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
@@ -1646,6 +1672,31 @@ static void dualshock4_calibration_work(struct work_struct *work)
spin_unlock_irqrestore(&sc->lock, flags);
}
+static int dualshock4_get_version_info(struct sony_sc *sc)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(DS4_FEATURE_REPORT_0xA3_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(sc->hdev, 0xA3, buf,
+ DS4_FEATURE_REPORT_0xA3_SIZE,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ kfree(buf);
+ return ret;
+ }
+
+ sc->hw_version = get_unaligned_le16(&buf[35]);
+ sc->fw_version = get_unaligned_le16(&buf[41]);
+
+ kfree(buf);
+ return 0;
+}
+
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
static const u8 sixaxis_leds[10][4] = {
@@ -2399,10 +2450,7 @@ static int sony_check_add(struct sony_sc *sc)
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2432,10 +2480,7 @@ static int sony_check_add(struct sony_sc *sc)
sc->mac_address[5-n] = buf[4+n];
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else {
return 0;
}
@@ -2619,6 +2664,28 @@ static int sony_input_configured(struct hid_device *hdev,
goto err_stop;
}
+ ret = dualshock4_get_version_info(sc);
+ if (ret < 0) {
+ hid_err(sc->hdev, "Failed to get version data from Dualshock 4\n");
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (ret) {
+ /* Make zero for cleanup reasons of sysfs entries. */
+ sc->fw_version = 0;
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
+ if (ret) {
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
/*
* The Dualshock 4 touchpad supports 2 touches and has a
* resolution of 1920x942 (44.86 dots/mm).
@@ -2695,6 +2762,10 @@ err_stop:
*/
if (sc->ds4_bt_poll_interval)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
if (sc->quirks & SONY_BATTERY_SUPPORT)
@@ -2796,6 +2867,12 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
+
sony_cancel_work_sync(sc);
kfree(sc->output_report_dmabuf);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 5fbe0f81ab2e..be210219f982 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,7 +249,7 @@ out:
return ret;
}
-static unsigned int hidraw_poll(struct file *file, poll_table *wait)
+static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index e054ee43c1e2..7230243b94d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -934,11 +934,6 @@ static int i2c_hid_of_probe(struct i2c_client *client,
}
pdata->hid_descriptor_address = val;
- ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
- &val);
- if (!ret)
- pdata->post_power_delay_ms = val;
-
return 0;
}
@@ -955,6 +950,16 @@ static inline int i2c_hid_of_probe(struct i2c_client *client,
}
#endif
+static void i2c_hid_fwnode_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ u32 val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ pdata->post_power_delay_ms = val;
+}
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -998,6 +1003,9 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
+ /* Parse platform agnostic common properties from ACPI / device tree */
+ i2c_hid_fwnode_probe(client, &ihid->pdata);
+
ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(ihid->pdata.supply)) {
ret = PTR_ERR(ihid->pdata.supply);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 2aac097c3f70..97869b7410eb 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,7 @@
#define SPT_Ax_DEVICE_ID 0x9D35
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d824f74f99..582e449be9fe 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 6f819f144cb4..fc43850a155e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -753,7 +753,7 @@ unlock:
return ret ? ret : count;
}
-static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 0ff227d0c033..b6349e30bd93 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,7 +3,7 @@
# Makefile for the USB input drivers
#
-usbhid-y := hid-core.o hid-quirks.o
+usbhid-y := hid-core.o
usbhid-$(CONFIG_USB_HIDDEV) += hiddev.o
usbhid-$(CONFIG_HID_PID) += hid-pidff.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 640dfb937c69..77c50cdfff97 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -978,8 +978,7 @@ static int usbhid_parse(struct hid_device *hid)
int num_descriptors;
size_t offset = offsetof(struct hid_descriptor, desc);
- quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ quirks = hid_lookup_quirk(hid);
if (quirks & HID_QUIRK_IGNORE)
return -ENODEV;
@@ -1328,8 +1327,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->bus = BUS_USB;
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
+ hid->version = le16_to_cpu(dev->descriptor.bcdDevice);
hid->name[0] = 0;
- hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1641,7 +1640,7 @@ static int __init hid_init(void)
{
int retval = -ENOMEM;
- retval = usbhid_quirks_init(quirks_param);
+ retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS);
if (retval)
goto usbhid_quirks_init_fail;
retval = usb_register(&hid_driver);
@@ -1651,7 +1650,7 @@ static int __init hid_init(void)
return 0;
usb_register_fail:
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
usbhid_quirks_init_fail:
return retval;
}
@@ -1659,7 +1658,7 @@ usbhid_quirks_init_fail:
static void __exit hid_exit(void)
{
usb_deregister(&hid_driver);
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
}
module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
deleted file mode 100644
index 331f7f34ec14..000000000000
--- a/drivers/hid/usbhid/hid-quirks.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * USB HID quirks support for Linux
- *
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2007 Paul Walmsley
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/hid.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include "../hid-ids.h"
-
-/*
- * Alphabetically sorted blacklist by quirk type.
- */
-
-static const struct hid_blacklist {
- __u16 idVendor;
- __u16 idProduct;
- __u32 quirks;
-} hid_blacklist[] = {
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT},
- { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-
- { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
-
- { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
-
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
-
- { 0, 0 }
-};
-
-/* Dynamic HID quirks list - specified at runtime */
-struct quirks_list_struct {
- struct hid_blacklist hid_bl_item;
- struct list_head node;
-};
-
-static LIST_HEAD(dquirks_list);
-static DECLARE_RWSEM(dquirks_rwsem);
-
-/* Runtime ("dynamic") quirks manipulation functions */
-
-/**
- * usbhid_exists_dquirk: find any dynamic quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Scans dquirks_list for a matching dynamic quirk and returns
- * the pointer to the relevant struct hid_blacklist if found.
- * Must be called with a read lock held on dquirks_rwsem.
- *
- * Returns: NULL if no quirk found, struct hid_blacklist * if found.
- */
-static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
- const u16 idProduct)
-{
- struct quirks_list_struct *q;
- struct hid_blacklist *bl_entry = NULL;
-
- list_for_each_entry(q, &dquirks_list, node) {
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
- bl_entry = &q->hid_bl_item;
- break;
- }
- }
-
- if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
-
- return bl_entry;
-}
-
-
-/**
- * usbhid_modify_dquirk: add/replace a HID quirk
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- * @quirks: the u32 quirks value to add/replace
- *
- * Description:
- * If an dynamic quirk exists in memory for this (idVendor,
- * idProduct) pair, replace its quirks value with what was
- * provided. Otherwise, add the quirk to the dynamic quirks list.
- *
- * Returns: 0 OK, -error on failure.
- */
-static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
- const u32 quirks)
-{
- struct quirks_list_struct *q_new, *q;
- int list_edited = 0;
-
- if (!idVendor) {
- dbg_hid("Cannot add a quirk with idVendor = 0\n");
- return -EINVAL;
- }
-
- q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
- if (!q_new)
- return -ENOMEM;
-
- q_new->hid_bl_item.idVendor = idVendor;
- q_new->hid_bl_item.idProduct = idProduct;
- q_new->hid_bl_item.quirks = quirks;
-
- down_write(&dquirks_rwsem);
-
- list_for_each_entry(q, &dquirks_list, node) {
-
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
-
- list_replace(&q->node, &q_new->node);
- kfree(q);
- list_edited = 1;
- break;
-
- }
-
- }
-
- if (!list_edited)
- list_add_tail(&q_new->node, &dquirks_list);
-
- up_write(&dquirks_rwsem);
-
- return 0;
-}
-
-/**
- * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
- *
- * Description:
- * Free all memory associated with dynamic quirks - called before
- * module unload.
- *
- */
-static void usbhid_remove_all_dquirks(void)
-{
- struct quirks_list_struct *q, *temp;
-
- down_write(&dquirks_rwsem);
- list_for_each_entry_safe(q, temp, &dquirks_list, node) {
- list_del(&q->node);
- kfree(q);
- }
- up_write(&dquirks_rwsem);
-
-}
-
-/**
- * usbhid_quirks_init: apply USB HID quirks specified at module load time
- */
-int usbhid_quirks_init(char **quirks_param)
-{
- u16 idVendor, idProduct;
- u32 quirks;
- int n = 0, m;
-
- for (; n < MAX_USBHID_BOOT_QUIRKS && quirks_param[n]; n++) {
-
- m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
- &idVendor, &idProduct, &quirks);
-
- if (m != 3 ||
- usbhid_modify_dquirk(idVendor, idProduct, quirks) != 0) {
- pr_warn("Could not parse HID quirk module param %s\n",
- quirks_param[n]);
- }
- }
-
- return 0;
-}
-
-/**
- * usbhid_quirks_exit: release memory associated with dynamic_quirks
- *
- * Description:
- * Release all memory associated with dynamic quirks. Called upon
- * module unload.
- *
- * Returns: nothing
- */
-void usbhid_quirks_exit(void)
-{
- usbhid_remove_all_dquirks();
-}
-
-/**
- * usbhid_exists_squirk: return any static quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return a pointer to
- * the hid_blacklist entry associated with that device.
- *
- * Returns: pointer if quirk found, or NULL if no quirks found.
- */
-static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
- const u16 idProduct)
-{
- const struct hid_blacklist *bl_entry = NULL;
- int n = 0;
-
- for (; hid_blacklist[n].idVendor; n++)
- if (hid_blacklist[n].idVendor == idVendor &&
- (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
- hid_blacklist[n].idProduct == idProduct))
- bl_entry = &hid_blacklist[n];
-
- if (bl_entry != NULL)
- dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
- return bl_entry;
-}
-
-/**
- * usbhid_lookup_quirk: return any quirks associated with a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return any quirks associated
- * with that device.
- *
- * Returns: a u32 quirks value.
- */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
-{
- u32 quirks = 0;
- const struct hid_blacklist *bl_entry = NULL;
-
- /* NCR devices must not be queried for reports */
- if (idVendor == USB_VENDOR_ID_NCR &&
- idProduct >= USB_DEVICE_ID_NCR_FIRST &&
- idProduct <= USB_DEVICE_ID_NCR_LAST)
- return HID_QUIRK_NO_INIT_REPORTS;
-
- down_read(&dquirks_rwsem);
- bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
- if (!bl_entry)
- bl_entry = usbhid_exists_squirk(idVendor, idProduct);
- if (bl_entry)
- quirks = bl_entry->quirks;
- up_read(&dquirks_rwsem);
-
- return quirks;
-}
-
-EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7d749b19c27c..0ff3e7e70c8d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -422,7 +422,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
* "poll" file op
* No kernel lock - fine
*/
-static unsigned int hiddev_poll(struct file *file, poll_table *wait)
+static __poll_t hiddev_poll(struct file *file, poll_table *wait)
{
struct hiddev_list *list = file->private_data;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index ee71ad9b6cc1..409543160af7 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -56,6 +56,107 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
return retval;
}
+static void wacom_wac_queue_insert(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo,
+ u8 *raw_data, int size)
+{
+ bool warned = false;
+
+ while (kfifo_avail(fifo) < size) {
+ if (!warned)
+ hid_warn(hdev, "%s: kfifo has filled, starting to drop events\n", __func__);
+ warned = true;
+
+ kfifo_skip(fifo);
+ }
+
+ kfifo_in(fifo, raw_data, size);
+}
+
+static void wacom_wac_queue_flush(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo)
+{
+ while (!kfifo_is_empty(fifo)) {
+ u8 buf[WACOM_PKGLEN_MAX];
+ int size;
+ int err;
+
+ size = kfifo_out(fifo, buf, sizeof(buf));
+ err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
+ if (err) {
+ hid_warn(hdev, "%s: unable to flush event due to error %d\n",
+ __func__, err);
+ }
+ }
+}
+
+static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ bool flush = false;
+ bool insert = false;
+ int i, j;
+
+ if (wacom_wac->serial[0] || !(features->quirks & WACOM_QUIRK_TOOLSERIAL))
+ return 0;
+
+ /* Queue events which have invalid tool type or serial number */
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ struct hid_field *field = report->field[i];
+ struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ unsigned int offset;
+ unsigned int size;
+ unsigned int value;
+
+ if (equivalent_usage != HID_DG_INRANGE &&
+ equivalent_usage != HID_DG_TOOLSERIALNUMBER &&
+ equivalent_usage != WACOM_HID_WD_SERIALHI &&
+ equivalent_usage != WACOM_HID_WD_TOOLTYPE)
+ continue;
+
+ offset = field->report_offset;
+ size = field->report_size;
+ value = hid_field_extract(hdev, raw_data+1, offset + j * size, size);
+
+ /* If we go out of range, we need to flush the queue ASAP */
+ if (equivalent_usage == HID_DG_INRANGE)
+ value = !value;
+
+ if (value) {
+ flush = true;
+ switch (equivalent_usage) {
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = value;
+ break;
+
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ break;
+
+ case WACOM_HID_WD_TOOLTYPE:
+ wacom_wac->id[0] = value;
+ break;
+ }
+ }
+ else {
+ insert = true;
+ }
+ }
+ }
+
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+
+ return insert && !flush;
+}
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -64,6 +165,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size > WACOM_PKGLEN_MAX)
return 1;
+ if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
+ return -1;
+
memcpy(wacom->wacom_wac.data, raw_data, size);
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -2347,23 +2451,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
int i;
unsigned long flags;
- spin_lock_irqsave(&remote->remote_lock, flags);
- remote->remotes[index].registered = false;
- spin_unlock_irqrestore(&remote->remote_lock, flags);
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
- if (remote->remotes[index].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index].battery.bat_desc);
+ spin_lock_irqsave(&remote->remote_lock, flags);
+ remote->remotes[i].registered = false;
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[index].group.name)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index]);
+ if (remote->remotes[i].battery.battery)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i].battery.bat_desc);
+
+ if (remote->remotes[i].group.name)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i]);
- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (remote->remotes[i].serial == serial) {
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].registered = false;
remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
@@ -2580,6 +2684,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail;
}
+ error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error)
+ goto fail;
+
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
@@ -2643,6 +2751,8 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
+ kfifo_free(&wacom_wac->pen_fifo);
+
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 16af6886e828..90c38a0523e9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
- bool is_touch_on = value;
bool do_report = false;
/*
@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
break;
case WACOM_HID_WD_MUTE_DEVICE:
- if (wacom_wac->shared->touch_input && value) {
- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
- is_touch_on = wacom_wac->shared->is_touch_on;
- }
-
- /* fall through*/
case WACOM_HID_WD_TOUCHONOFF:
if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !is_touch_on);
+ SW_MUTE_DEVICE, !(*is_touch_on));
input_sync(wacom_wac->shared->touch_input);
}
break;
@@ -2085,7 +2085,29 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
+ features->quirks |= WACOM_QUIRK_TOOLSERIAL;
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
+
+ /* Adjust AES usages to match modern convention */
+ if (usage->hid == WACOM_HID_WT_SERIALNUMBER && field->report_size == 16) {
+ if (field->index + 2 < field->report->maxfield) {
+ struct hid_field *a = field->report->field[field->index + 1];
+ struct hid_field *b = field->report->field[field->index + 2];
+
+ if (a->maxusage > 0 && a->usage[0].hid == HID_DG_TOOLSERIALNUMBER && a->report_size == 32 &&
+ b->maxusage > 0 && b->usage[0].hid == 0xFF000000 && b->report_size == 8) {
+ features->quirks |= WACOM_QUIRK_AESPEN;
+ usage->hid = WACOM_HID_WD_TOOLTYPE;
+ field->logical_minimum = S16_MIN;
+ field->logical_maximum = S16_MAX;
+ a->logical_minimum = S32_MIN;
+ a->logical_maximum = S32_MAX;
+ b->usage[0].hid = WACOM_HID_WD_SERIALHI;
+ b->logical_minimum = 0;
+ b->logical_maximum = U8_MAX;
+ }
+ }
+ }
break;
case WACOM_HID_WD_SENSE:
features->quirks |= WACOM_QUIRK_SENSE;
@@ -2093,15 +2115,18 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
break;
case WACOM_HID_WD_SERIALHI:
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ }
}
break;
case WACOM_HID_WD_FINGERWHEEL:
@@ -4390,6 +4415,12 @@ static const struct wacom_features wacom_features_0x360 =
static const struct wacom_features wacom_features_0x361 =
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x37A =
+ { "Wacom One by Wacom S", 15200, 9500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x37B =
+ { "Wacom One by Wacom M", 21600, 13500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4558,6 +4589,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x343) },
{ BT_DEVICE_WACOM(0x360) },
{ BT_DEVICE_WACOM(0x361) },
+ { USB_DEVICE_WACOM(0x37A) },
+ { USB_DEVICE_WACOM(0x37B) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 64d8f014602e..15d9c14fbdf7 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/hid.h>
+#include <linux/kfifo.h>
/* maximum packet length for USB/BT devices */
#define WACOM_PKGLEN_MAX 361
@@ -86,7 +87,9 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
+#define WACOM_QUIRK_AESPEN 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
+#define WACOM_QUIRK_TOOLSERIAL 0x0010
/* device types */
#define WACOM_DEVICETYPE_NONE 0x0000
@@ -107,6 +110,7 @@
#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALNUMBER (WACOM_HID_UP_WACOMDIGITIZER | 0x5b)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
@@ -150,6 +154,7 @@
#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_SERIALNUMBER (WACOM_HID_UP_WACOMTOUCH | 0x5b)
#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
@@ -336,6 +341,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
+ struct kfifo_rec_ptr_2 pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 727f968ac1cb..8fbbacb0fe21 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -451,11 +451,11 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
cs_release_cmd(msg);
if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
- struct timespec tspec;
+ struct timespec64 tspec;
struct cs_timestamp *tstamp =
&hi->mmap_cfg->tstamp_rx_ctrl;
- ktime_get_ts(&tspec);
+ ktime_get_ts64(&tspec);
tstamp->tv_sec = (__u32) tspec.tv_sec;
tstamp->tv_nsec = (__u32) tspec.tv_nsec;
@@ -1124,10 +1124,10 @@ static int cs_char_fasync(int fd, struct file *file, int on)
return 0;
}
-static unsigned int cs_char_poll(struct file *file, poll_table *wait)
+static __poll_t cs_char_poll(struct file *file, poll_table *wait)
{
struct cs_char *csdata = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &cs_char_data.wait, wait);
spin_lock_bh(&csdata->lock);
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4402a71e23f7..047959e74bb1 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -104,7 +104,7 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf,
return ret ? ret : count;
}
-static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
+static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
{
struct hvutil_transport *hvt;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 12eb8caa4263..50e071444a5c 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -140,6 +140,29 @@ static u32 hv_copyto_ringbuffer(
return start_write_offset;
}
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static void
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
+{
+ u32 read_loc, write_loc, dsize;
+
+ /* Capture the read/write indices before they changed */
+ read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+ write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+ dsize = rbi->ring_datasize;
+
+ *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ *read = dsize - *write;
+}
+
/* Get various debug metrics for the specified ring buffer. */
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 76ed9a216f10..610223f0e945 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
pr_debug("child device %s unregistered\n",
dev_name(&device_obj->device));
+ kset_unregister(device_obj->channels_kset);
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7ad017690e3a..ef23553ff5cb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -26,11 +26,9 @@ if HWMON
config HWMON_VID
tristate
- default n
config HWMON_DEBUG_CHIP
bool "Hardware Monitoring Chip debugging messages"
- default n
help
Say Y here if you want the I2C chip drivers to produce a bunch of
debug messages to the system log. Select this if you are having
@@ -42,7 +40,6 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
depends on AB8500_GPADC && AB8500_BM
- default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -302,7 +299,6 @@ config SENSORS_APPLESMC
select NEW_LEDS
select LEDS_CLASS
select INPUT_POLLDEV
- default n
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -678,7 +674,6 @@ config SENSORS_JC42
config SENSORS_POWR1220
tristate "Lattice POWR1220 Power Monitoring"
depends on I2C
- default n
help
If you say yes here you get access to the hardware monitoring
functions of the Lattice POWR1220 isp Power Supply Monitoring,
@@ -702,7 +697,6 @@ config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC2945
I2C System Monitor.
@@ -727,7 +721,6 @@ config SENSORS_LTC2990
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4151
High Voltage I2C Current and Voltage Monitor interface.
@@ -738,7 +731,6 @@ config SENSORS_LTC4151
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4215
Hot Swap Controller I2C interface.
@@ -750,7 +742,6 @@ config SENSORS_LTC4222
tristate "Linear Technology LTC4222"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4222
Dual Hot Swap Controller I2C interface.
@@ -761,7 +752,6 @@ config SENSORS_LTC4222
config SENSORS_LTC4245
tristate "Linear Technology LTC4245"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4245
Multiple Supply Hot Swap Controller I2C interface.
@@ -773,7 +763,6 @@ config SENSORS_LTC4260
tristate "Linear Technology LTC4260"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4260
Positive Voltage Hot Swap Controller I2C interface.
@@ -784,7 +773,6 @@ config SENSORS_LTC4260
config SENSORS_LTC4261
tristate "Linear Technology LTC4261"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4261
Negative Voltage Hot Swap Controller I2C interface.
@@ -1276,7 +1264,6 @@ config SENSORS_NSA320
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
- default n
help
If you say yes here you get support for Philips PCF8591 4-channel
ADC, 1-channel DAC chips.
@@ -1459,7 +1446,6 @@ config SENSORS_SMSC47B397
config SENSORS_SCH56XX_COMMON
tristate
- default n
config SENSORS_SCH5627
tristate "SMSC SCH5627"
@@ -1505,7 +1491,6 @@ config SENSORS_STTS751
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
- default n
help
If you say yes here you get support for the hardware monitoring
features of the Summit Microelectronics SMM665/SMM665B Six-Channel
@@ -1725,6 +1710,16 @@ config SENSORS_VT8231
This driver can also be built as a module. If so, the module
will be called vt8231.
+config SENSORS_W83773G
+ tristate "Nuvoton W83773G"
+ depends on I2C
+ help
+ If you say yes here you get support for the Nuvoton W83773G hardware
+ monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83773g.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, Asus AS99127F"
depends on I2C
@@ -1782,7 +1777,6 @@ config SENSORS_W83795
config SENSORS_W83795_FANCTRL
bool "Include automatic fan control support (DANGEROUS)"
depends on SENSORS_W83795
- default n
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0fe489fab663..f814b4ace138 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
# asb100, then w83781d go first, as they can override other drivers' addresses.
obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
+obj-$(CONFIG_SENSORS_W83773G) += w83773g.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
obj-$(CONFIG_SENSORS_W83795) += w83795.o
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 63a95e23ca81..693a3d53cab5 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -181,6 +182,7 @@ struct aspeed_cooling_device {
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
+ struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
bool fan_tach_present[16];
@@ -905,6 +907,13 @@ static int aspeed_create_fan(struct device *dev,
return 0;
}
+static void aspeed_pwm_tacho_remove(void *data)
+{
+ struct aspeed_pwm_tacho_data *priv = data;
+
+ reset_control_assert(priv->rst);
+}
+
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -931,6 +940,19 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tacho_remove, priv);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE_EXT, 0);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd86b3c..4bdbf77f7197 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -246,7 +246,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
- struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ u16 devfn = PCI_DEVFN(0, 0);
+ struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
/*
* Explicit tjmax table entries override heuristics.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c7c9e95e58a8..bf3bb7e1adab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,7 @@ static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
+static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -242,6 +243,9 @@ static int i8k_get_fan_status(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
@@ -253,6 +257,9 @@ static int i8k_get_fan_speed(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -264,7 +271,7 @@ static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
- if (disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
regs.ebx = fan & 0xff;
@@ -289,6 +296,9 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -300,6 +310,9 @@ static int i8k_set_fan(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
@@ -772,6 +785,8 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_support && index >= 8)
+ return 0;
if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15))
return 0;
@@ -1039,6 +1054,30 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
};
/*
+ * On some machines all fan related SMM functions implemented by Dell BIOS
+ * firmware freeze kernel for about 500ms. Until Dell fixes these problems fan
+ * support for affected blacklisted Dell machines stay disabled.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=195751
+ */
+static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ {
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ },
+ {
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ },
+ { }
+};
+
+/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
@@ -1060,8 +1099,17 @@ static int __init i8k_probe(void)
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
- disallow_fan_type_call = true;
+ if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan support\n");
+ if (!force)
+ disallow_fan_support = true;
+ }
+
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan type call\n");
+ if (!force)
+ disallow_fan_type_call = true;
+ }
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 7b73d2002d3e..0ae1ee1dbf76 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -37,7 +37,7 @@
/**
* struct hih6130 - HIH-6130 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: pointer to I2C client device
* @lock: mutex to protect measurement values
* @valid: only false before first measurement is taken
* @last_update: time of last update (jiffies)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c9790e2c3440..32083e452cde 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
struct hwmon_device *hwdev, int index)
{
struct hwmon_thermal_data *tdata;
+ struct thermal_zone_device *tzd;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
tdata->hwdev = hwdev;
tdata->index = index;
- devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
- &hwmon_thermal_ops);
+ tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ &hwmon_thermal_ops);
+ /*
+ * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+ * so ignore that error but forward any other error.
+ */
+ if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+ return PTR_ERR(tzd);
return 0;
}
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (!chip->ops->is_visible(drvdata, hwmon_temp,
hwmon_temp_input, j))
continue;
- if (info[i]->config[j] & HWMON_T_INPUT)
- hwmon_thermal_add_sensor(dev, hwdev, j);
+ if (info[i]->config[j] & HWMON_T_INPUT) {
+ err = hwmon_thermal_add_sensor(dev,
+ hwdev, j);
+ if (err)
+ goto free_device;
+ }
}
}
}
return hdev;
+free_device:
+ device_unregister(hdev);
free_hwmon:
kfree(hwdev);
ida_remove:
@@ -665,7 +678,7 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: pointer to hwmon chip information
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
@@ -772,11 +785,11 @@ EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
/**
* devm_hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
+ * @groups: pointer to list of driver specific attribute groups
*
* Returns the pointer to the new device. The new device is automatically
* unregistered with the parent device.
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f6a76679c650..5e5b32a1ec4b 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -23,7 +23,8 @@
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
* @hwmon_dev: associated hwmon device
- * @attr_group: the group of attributes
+ * @attr_group: the group of attributes
+ * @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
*/
struct iio_hwmon_state {
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 62e38fa8cda2..e9e6aeabbf84 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -116,21 +118,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[chip];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e175664a..06b4e1c78bd8 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 005ffb5ffa92..49f4b33a5685 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -100,7 +100,7 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_chip_update_interval:
*val = data->sample_time;
- break;;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 08479006c7f9..6e4298e99222 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -39,6 +39,7 @@ config SENSORS_ADM1275
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
+ depends on LEDS_CLASS
help
If you say yes here you get hardware monitoring support for the IBM
Common Form Factor power supply.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index cb56da6834e5..93d9a9ea112b 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -8,12 +8,29 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pmbus.h>
#include "pmbus.h"
+#define CFFPS_FRU_CMD 0x9A
+#define CFFPS_PN_CMD 0x9B
+#define CFFPS_SN_CMD 0x9E
+#define CFFPS_CCIN_CMD 0xBD
+#define CFFPS_FW_CMD_START 0xFA
+#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_SYS_CONFIG_CMD 0xDA
+
+#define CFFPS_INPUT_HISTORY_CMD 0xD6
+#define CFFPS_INPUT_HISTORY_SIZE 100
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -24,6 +41,153 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+#define CFFPS_LED_BLINK BIT(0)
+#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
+#define CFFPS_BLINK_RATE_MS 250
+
+enum {
+ CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_FRU,
+ CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_CCIN,
+ CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_NUM_ENTRIES
+};
+
+struct ibm_cffps_input_history {
+ struct mutex update_lock;
+ unsigned long last_update;
+
+ u8 byte_count;
+ u8 data[CFFPS_INPUT_HISTORY_SIZE];
+};
+
+struct ibm_cffps {
+ struct i2c_client *client;
+
+ struct ibm_cffps_input_history input_history;
+
+ int debugfs_entries[CFFPS_DEBUGFS_NUM_ENTRIES];
+
+ char led_name[32];
+ u8 led_state;
+ struct led_classdev led;
+};
+
+#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
+
+static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int rc;
+ u8 msgbuf0[1] = { CFFPS_INPUT_HISTORY_CMD };
+ u8 msgbuf1[CFFPS_INPUT_HISTORY_SIZE + 1] = { 0 };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags | I2C_M_RD,
+ .len = CFFPS_INPUT_HISTORY_SIZE + 1,
+ .buf = msgbuf1,
+ },
+ };
+
+ if (!*ppos) {
+ mutex_lock(&psu->input_history.update_lock);
+ if (time_after(jiffies, psu->input_history.last_update + HZ)) {
+ /*
+ * Use a raw i2c transfer, since we need more bytes
+ * than Linux I2C supports through smbus xfr (only 32).
+ */
+ rc = i2c_transfer(psu->client->adapter, msg, 2);
+ if (rc < 0) {
+ mutex_unlock(&psu->input_history.update_lock);
+ return rc;
+ }
+
+ psu->input_history.byte_count = msgbuf1[0];
+ memcpy(psu->input_history.data, &msgbuf1[1],
+ CFFPS_INPUT_HISTORY_SIZE);
+ psu->input_history.last_update = jiffies;
+ }
+
+ mutex_unlock(&psu->input_history.update_lock);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos,
+ psu->input_history.data,
+ psu->input_history.byte_count);
+}
+
+static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 cmd;
+ int i, rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+ char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_INPUT_HISTORY:
+ return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_FRU:
+ cmd = CFFPS_FRU_CMD;
+ break;
+ case CFFPS_DEBUGFS_PN:
+ cmd = CFFPS_PN_CMD;
+ break;
+ case CFFPS_DEBUGFS_SN:
+ cmd = CFFPS_SN_CMD;
+ break;
+ case CFFPS_DEBUGFS_CCIN:
+ rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 5, "%04X", rc);
+ goto done;
+ case CFFPS_DEBUGFS_FW:
+ for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD_START + i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
+
+ rc = i * 2;
+ goto done;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_smbus_read_block_data(psu->client, cmd, data);
+ if (rc < 0)
+ return rc;
+
+done:
+ data[rc] = '\n';
+ rc += 2;
+
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
+}
+
+static const struct file_operations ibm_cffps_fops = {
+ .llseek = noop_llseek,
+ .read = ibm_cffps_debugfs_op,
+ .open = simple_open,
+};
+
static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
int reg)
{
@@ -105,6 +269,69 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
+static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ if (brightness == LED_OFF) {
+ psu->led_state = CFFPS_LED_OFF;
+ } else {
+ brightness = LED_FULL;
+ if (psu->led_state != CFFPS_LED_BLINK)
+ psu->led_state = CFFPS_LED_ON;
+ }
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ psu->led_state);
+ if (rc < 0)
+ return;
+
+ led_cdev->brightness = brightness;
+}
+
+static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ psu->led_state = CFFPS_LED_BLINK;
+
+ if (led_cdev->brightness == LED_OFF)
+ return 0;
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_BLINK);
+ if (rc < 0)
+ return rc;
+
+ *delay_on = CFFPS_BLINK_RATE_MS;
+ *delay_off = CFFPS_BLINK_RATE_MS;
+
+ return 0;
+}
+
+static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
+{
+ int rc;
+ struct i2c_client *client = psu->client;
+ struct device *dev = &client->dev;
+
+ snprintf(psu->led_name, sizeof(psu->led_name), "%s-%02x", client->name,
+ client->addr);
+ psu->led.name = psu->led_name;
+ psu->led.max_brightness = LED_FULL;
+ psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.blink_set = ibm_cffps_led_blink_set;
+
+ rc = devm_led_classdev_register(dev, &psu->led);
+ if (rc)
+ dev_warn(dev, "failed to register led class: %d\n", rc);
+}
+
static struct pmbus_driver_info ibm_cffps_info = {
.pages = 1,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
@@ -116,10 +343,69 @@ static struct pmbus_driver_info ibm_cffps_info = {
.read_word_data = ibm_cffps_read_word_data,
};
+static struct pmbus_platform_data ibm_cffps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &ibm_cffps_info);
+ int i, rc;
+ struct dentry *debugfs;
+ struct dentry *ibm_cffps_dir;
+ struct ibm_cffps *psu;
+
+ client->dev.platform_data = &ibm_cffps_pdata;
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't fail the probe if there isn't enough memory for leds and
+ * debugfs.
+ */
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return 0;
+
+ psu->client = client;
+ mutex_init(&psu->input_history.update_lock);
+ psu->input_history.last_update = jiffies - HZ;
+
+ ibm_cffps_create_led_class(psu);
+
+ /* Don't fail the probe if we can't create debugfs */
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return 0;
+
+ ibm_cffps_dir = debugfs_create_dir(client->name, debugfs);
+ if (!ibm_cffps_dir)
+ return 0;
+
+ for (i = 0; i < CFFPS_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("input_history", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
+ &ibm_cffps_fops);
+ debugfs_create_file("fru", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
+ &ibm_cffps_fops);
+ debugfs_create_file("part_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
+ &ibm_cffps_fops);
+ debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
+ &ibm_cffps_fops);
+ debugfs_create_file("ccin", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
+ &ibm_cffps_fops);
+ debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
+ &ibm_cffps_fops);
+
+ return 0;
}
static const struct i2c_device_id ibm_cffps_id[] = {
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 8b906b44484b..977315b0fd90 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -25,168 +25,19 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
-{
- s16 exponent;
- s32 mantissa;
- long val;
-
- /* We only modify LINEAR11 formats */
- exponent = ((s16)data) >> 11;
- mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
-
- val = mantissa * 1000L;
-
- /* scale result to micro-units for power sensors */
- if (class == PSC_POWER)
- val = val * 1000L;
-
- if (exponent >= 0)
- val <<= exponent;
- else
- val >>= -exponent;
-
- return val;
-}
-
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
-
-static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
-{
- s16 exponent = 0, mantissa;
- bool negative = false;
-
- if (val == 0)
- return 0;
-
- if (val < 0) {
- negative = true;
- val = -val;
- }
-
- /* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
- val = DIV_ROUND_CLOSEST(val, 1000L);
-
- /* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
- exponent++;
- val >>= 1;
- }
- /* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
- exponent--;
- val <<= 1;
- }
-
- /* Convert mantissa from milli-units to units */
- mantissa = DIV_ROUND_CLOSEST(val, 1000);
-
- /* Ensure that resulting number is within range */
- if (mantissa > 0x3ff)
- mantissa = 0x3ff;
-
- /* restore sign */
- if (negative)
- mantissa = -mantissa;
-
- /* Convert to 5 bit exponent, 11 bit mantissa */
- return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
-}
-
-static u16 ir35221_scale_result(s16 data, int shift,
- enum pmbus_sensor_classes class)
-{
- long val;
-
- val = ir35221_reg2data(data, class);
-
- if (shift < 0)
- val >>= -shift;
- else
- val <<= shift;
-
- return ir35221_data2reg(val, class);
-}
-
static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_VIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
- break;
- case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_POUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_IOUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
- break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
@@ -194,9 +45,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_VIN_VALLEY);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page,
@@ -205,12 +53,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_IOUT_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_IOUT_VALLEY);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
ret = pmbus_read_word_data(client, page,
@@ -224,36 +66,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- int ret;
- u16 val;
-
- switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- default:
- ret = -ENODATA;
- break;
- }
-
- return ret;
-}
-
static int ir35221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -292,7 +104,6 @@ static int ir35221_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- info->write_word_data = ir35221_write_word_data;
info->read_word_data = ir35221_read_word_data;
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 10d17fb8f283..53db78753a0d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -53,11 +53,6 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
-/* LM25063 only */
-
-#define LM25063_READ_VOUT_MAX 0xe5
-#define LM25063_READ_VOUT_MIN 0xe6
-
struct __coeff {
short m, b, R;
};
@@ -122,36 +117,6 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
- [lm25063] = {
- [PSC_VOLTAGE_IN] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_VOLTAGE_OUT] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_CURRENT_IN] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_CURRENT_IN_L] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_POWER] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_POWER_L] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_TEMPERATURE] = {
- .m = 15596,
- .R = -3,
- },
- },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -272,10 +237,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
- case lm25063:
- /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
- ret = DIV_ROUND_CLOSEST(ret * 20, 625);
- break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -330,24 +291,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
- break;
- case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
- break;
- default:
- ret = lm25066_read_word_data(client, page, reg);
- break;
- }
- return ret;
-}
-
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -502,11 +445,6 @@ static int lm25066_probe(struct i2c_client *client,
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
data->rlimit = 0x0fff;
- } else if (data->id == lm25063) {
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_POUT;
- info->read_word_data = lm25063_read_word_data;
- data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
@@ -543,7 +481,6 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
- {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 9313849d5160..c9dc8799b5e1 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -16,12 +16,231 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAN_CONFIG = 0xf1,
};
+#define MAX31785 0x3030
+#define MAX31785A 0x3040
+
+#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+
#define MAX31785_NR_PAGES 23
+#define MAX31785_NR_FAN_PAGES 6
+
+static int max31785_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ return -ENOTSUPP;
+ case PMBUS_FAN_CONFIG_12:
+ return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
+ reg);
+ }
+
+ return -ENODATA;
+}
+
+static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ return -ENOTSUPP;
+}
+
+static int max31785_read_long_data(struct i2c_client *client, int page,
+ int reg, u32 *data)
+{
+ unsigned char cmdbuf[1];
+ unsigned char rspbuf[4];
+ int rc;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rspbuf),
+ .buf = rspbuf,
+ },
+ };
+
+ cmdbuf[0] = reg;
+
+ rc = pmbus_set_page(client, page);
+ if (rc < 0)
+ return rc;
+
+ rc = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (rc < 0)
+ return rc;
+
+ *data = (rspbuf[0] << (0 * 8)) | (rspbuf[1] << (1 * 8)) |
+ (rspbuf[2] << (2 * 8)) | (rspbuf[3] << (3 * 8));
+
+ return rc;
+}
+
+static int max31785_get_pwm(struct i2c_client *client, int page)
+{
+ int rv;
+
+ rv = pmbus_get_fan_rate_device(client, page, 0, percent);
+ if (rv < 0)
+ return rv;
+ else if (rv >= 0x8000)
+ return 0;
+ else if (rv >= 0x2711)
+ return 0x2710;
+
+ return rv;
+}
+
+static int max31785_get_pwm_mode(struct i2c_client *client, int page)
+{
+ int config;
+ int command;
+
+ config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (config < 0)
+ return config;
+
+ command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ if (command < 0)
+ return command;
+
+ if (config & PB_FAN_1_RPM)
+ return (command >= 0x8000) ? 3 : 2;
+
+ if (command >= 0x8000)
+ return 3;
+ else if (command >= 0x2711)
+ return 0;
+
+ return 1;
+}
+
+static int max31785_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ u32 val;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_READ_FAN_SPEED_1:
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
+ reg, &val);
+ if (rv < 0)
+ return rv;
+
+ rv = (val >> 16) & 0xffff;
+ break;
+ case PMBUS_FAN_COMMAND_1:
+ /*
+ * PMBUS_FAN_COMMAND_x is probed to judge whether or not to
+ * expose fan control registers.
+ *
+ * Don't expose fan_target attribute for virtual pages.
+ */
+ rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ break;
+ case PMBUS_VIRT_PWM_1:
+ rv = max31785_get_pwm(client, page);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ rv = max31785_get_pwm_mode(client, page);
+ break;
+ default:
+ rv = -ENODATA;
+ break;
+ }
+
+ return rv;
+}
+
+static inline u32 max31785_scale_pwm(u32 sensor_val)
+{
+ /*
+ * The datasheet describes the accepted value range for manual PWM as
+ * [0, 0x2710], while the hwmon pwmX sysfs interface accepts values in
+ * [0, 255]. The MAX31785 uses DIRECT mode to scale the FAN_COMMAND
+ * registers and in PWM mode the coefficients are m=1, b=0, R=2. The
+ * important observation here is that 0x2710 == 10000 == 100 * 100.
+ *
+ * R=2 (== 10^2 == 100) accounts for scaling the value provided at the
+ * sysfs interface into the required hardware resolution, but it does
+ * not yet yield a value that we can write to the device (this initial
+ * scaling is handled by pmbus_data2reg()). Multiplying by 100 below
+ * translates the parameter value into the percentage units required by
+ * PMBus, and then we scale back by 255 as required by the hwmon pwmX
+ * interface to yield the percentage value at the appropriate
+ * resolution for hardware.
+ */
+ return (sensor_val * 100) / 255;
+}
+
+static int max31785_pwm_enable(struct i2c_client *client, int page,
+ u16 word)
+{
+ int config = 0;
+ int rate;
+
+ switch (word) {
+ case 0:
+ rate = 0x7fff;
+ break;
+ case 1:
+ rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
+ if (rate < 0)
+ return rate;
+ rate = max31785_scale_pwm(rate);
+ break;
+ case 2:
+ config = PB_FAN_1_RPM;
+ rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
+ if (rate < 0)
+ return rate;
+ break;
+ case 3:
+ rate = 0xffff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+}
+
+static int max31785_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_PWM_1:
+ return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ max31785_scale_pwm(word));
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ return max31785_pwm_enable(client, page, word);
+ default:
+ break;
+ }
+
+ return -ENODATA;
+}
#define MAX31785_FAN_FUNCS \
- (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_PWM12)
#define MAX31785_TEMP_FUNCS \
(PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
@@ -29,14 +248,26 @@ enum max31785_regs {
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+#define MAX37185_NUM_FAN_PAGES 6
+
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
+ .write_word_data = max31785_write_word_data,
+ .read_byte_data = max31785_read_byte_data,
+ .read_word_data = max31785_read_word_data,
+ .write_byte = max31785_write_byte,
+
/* RPM */
.format[PSC_FAN] = direct,
.m[PSC_FAN] = 1,
.b[PSC_FAN] = 0,
.R[PSC_FAN] = 0,
+ /* PWM */
+ .format[PSC_PWM] = direct,
+ .m[PSC_PWM] = 1,
+ .b[PSC_PWM] = 0,
+ .R[PSC_PWM] = 2,
.func[0] = MAX31785_FAN_FUNCS,
.func[1] = MAX31785_FAN_FUNCS,
.func[2] = MAX31785_FAN_FUNCS,
@@ -72,13 +303,46 @@ static const struct pmbus_driver_info max31785_info = {
.func[22] = MAX31785_VOUT_FUNCS,
};
+static int max31785_configure_dual_tach(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MFR_FAN_CONFIG_DUAL_TACH) {
+ int virtual = MAX31785_NR_PAGES + i;
+
+ info->pages = virtual + 1;
+ info->func[virtual] |= PMBUS_HAVE_FAN12;
+ info->func[virtual] |= PMBUS_PAGE_VIRTUAL;
+ }
+ }
+
+ return 0;
+}
+
static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ bool dual_tach = false;
s64 ret;
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -89,6 +353,25 @@ static int max31785_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = i2c_smbus_read_word_data(client, MFR_REVISION);
+ if (ret < 0)
+ return ret;
+
+ if (ret == MAX31785A) {
+ dual_tach = true;
+ } else if (ret == MAX31785) {
+ if (!strcmp("max31785a", id->name))
+ dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ } else {
+ return -ENODEV;
+ }
+
+ if (dual_tach) {
+ ret = max31785_configure_dual_tach(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return pmbus_do_probe(client, id, info);
}
@@ -100,9 +383,18 @@ static const struct i2c_device_id max31785_id[] = {
MODULE_DEVICE_TABLE(i2c, max31785_id);
+static const struct of_device_id max31785_of_match[] = {
+ { .compatible = "maxim,max31785" },
+ { .compatible = "maxim,max31785a" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max31785_of_match);
+
static struct i2c_driver max31785_driver = {
.driver = {
.name = "max31785",
+ .of_match_table = max31785_of_match,
},
.probe = max31785_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa613bd209e3..1d24397d36ec 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -190,6 +190,33 @@ enum pmbus_regs {
PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
PMBUS_VIRT_STATUS_VMON,
+
+ /*
+ * RPM and PWM Fan control
+ *
+ * Drivers wanting to expose PWM control must define the behaviour of
+ * PMBUS_VIRT_PWM_[1-4] and PMBUS_VIRT_PWM_ENABLE_[1-4] in the
+ * {read,write}_word_data callback.
+ *
+ * pmbus core provides a default implementation for
+ * PMBUS_VIRT_FAN_TARGET_[1-4].
+ *
+ * TARGET, PWM and PWM_ENABLE members must be defined sequentially;
+ * pmbus core uses the difference between the provided register and
+ * it's _1 counterpart to calculate the FAN/PWM ID.
+ */
+ PMBUS_VIRT_FAN_TARGET_1,
+ PMBUS_VIRT_FAN_TARGET_2,
+ PMBUS_VIRT_FAN_TARGET_3,
+ PMBUS_VIRT_FAN_TARGET_4,
+ PMBUS_VIRT_PWM_1,
+ PMBUS_VIRT_PWM_2,
+ PMBUS_VIRT_PWM_3,
+ PMBUS_VIRT_PWM_4,
+ PMBUS_VIRT_PWM_ENABLE_1,
+ PMBUS_VIRT_PWM_ENABLE_2,
+ PMBUS_VIRT_PWM_ENABLE_3,
+ PMBUS_VIRT_PWM_ENABLE_4,
};
/*
@@ -223,6 +250,8 @@ enum pmbus_regs {
#define PB_FAN_1_RPM BIT(6)
#define PB_FAN_1_INSTALLED BIT(7)
+enum pmbus_fan_mode { percent = 0, rpm };
+
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
@@ -313,6 +342,7 @@ enum pmbus_sensor_classes {
PSC_POWER,
PSC_TEMPERATURE,
PSC_FAN,
+ PSC_PWM,
PSC_NUM_CLASSES /* Number of power sensor classes */
};
@@ -339,6 +369,10 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
#define PMBUS_HAVE_VMON BIT(18)
#define PMBUS_HAVE_STATUS_VMON BIT(19)
+#define PMBUS_HAVE_PWM12 BIT(20)
+#define PMBUS_HAVE_PWM34 BIT(21)
+
+#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13 };
@@ -421,5 +455,12 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client);
#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a139940cd991..f7c47d7994e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -65,6 +65,7 @@ struct pmbus_sensor {
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
+ bool convert; /* Whether or not to apply linear/vid/direct */
int data; /* Sensor data.
Negative if there was a read error */
};
@@ -129,6 +130,27 @@ struct pmbus_debugfs_entry {
u8 reg;
};
+static const int pmbus_fan_rpm_mask[] = {
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_command_registers[] = {
+ PMBUS_FAN_COMMAND_1,
+ PMBUS_FAN_COMMAND_2,
+ PMBUS_FAN_COMMAND_3,
+ PMBUS_FAN_COMMAND_4,
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -140,18 +162,27 @@ EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- int rv = 0;
- int newpage;
+ int rv;
- if (page >= 0 && page != data->currpage) {
+ if (page < 0 || page == data->currpage)
+ return 0;
+
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- if (newpage != page)
- rv = -EIO;
- else
- data->currpage = page;
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (rv < 0)
+ return rv;
+
+ if (rv != page)
+ return -EIO;
}
- return rv;
+
+ data->currpage = page;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -198,6 +229,28 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int bit;
+ int id;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_update_fan(client, page, id, bit, bit, word);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -214,11 +267,38 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_write_virt_reg(client, page, reg, word);
+
return pmbus_write_word_data(client, page, reg, word);
}
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+
+ return _pmbus_write_word_data(client, page,
+ pmbus_fan_command_registers[id], command);
+}
+EXPORT_SYMBOL_GPL(pmbus_update_fan);
+
int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -231,6 +311,24 @@ int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ int id;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, rpm);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -246,8 +344,10 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_read_virt_reg(client, page, reg);
+
return pmbus_read_word_data(client, page, reg);
}
@@ -312,6 +412,68 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
return pmbus_read_byte_data(client, page, reg);
}
+static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
+ int reg)
+{
+ struct pmbus_sensor *sensor;
+
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
+ if (sensor->page == page && sensor->reg == reg)
+ return sensor;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode,
+ bool from_cache)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ bool want_rpm, have_rpm;
+ struct pmbus_sensor *s;
+ int config;
+ int reg;
+
+ want_rpm = (mode == rpm);
+
+ if (from_cache) {
+ reg = want_rpm ? PMBUS_VIRT_FAN_TARGET_1 : PMBUS_VIRT_PWM_1;
+ s = pmbus_find_sensor(data, page, reg + id);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ return s->data;
+ }
+
+ config = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (config < 0)
+ return config;
+
+ have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
+ if (want_rpm == have_rpm)
+ return pmbus_read_word_data(client, page,
+ pmbus_fan_command_registers[id]);
+
+ /* Can't sensibly map between RPM and PWM, just return zero */
+ return 0;
+}
+
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, false);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_device);
+
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, true);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_cached);
+
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
@@ -513,7 +675,7 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/* X = 1/m * (Y * 10^-R - b) */
R = -R;
/* scale result to milli-units for everything but fans */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R += 3;
b *= 1000;
}
@@ -568,6 +730,9 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
+ if (!sensor->convert)
+ return sensor->data;
+
switch (data->info->format[sensor->class]) {
case direct:
val = pmbus_reg2data_direct(data, sensor);
@@ -672,7 +837,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
/* Calculate Y = (m * X + b) * 10^R */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -703,6 +868,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
{
u16 regval;
+ if (!sensor->convert)
+ return val;
+
switch (data->info->format[sensor->class]) {
case direct:
regval = pmbus_data2reg_direct(data, sensor, val);
@@ -915,7 +1083,8 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
int seq, int page, int reg,
enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ bool update, bool readonly,
+ bool convert)
{
struct pmbus_sensor *sensor;
struct device_attribute *a;
@@ -925,12 +1094,18 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
return NULL;
a = &sensor->attribute;
- snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
- name, seq, type);
+ if (type)
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ else
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d",
+ name, seq);
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
+ sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1029,7 +1204,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
curr = pmbus_add_sensor(data, name, l->attr, index,
page, l->reg, attr->class,
attr->update || l->update,
- false);
+ false, true);
if (!curr)
return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
@@ -1068,7 +1243,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ attr->class, true, true, true);
if (!base)
return -ENOMEM;
if (attr->sfunc) {
@@ -1592,13 +1767,6 @@ static const int pmbus_fan_registers[] = {
PMBUS_READ_FAN_SPEED_4
};
-static const int pmbus_fan_config_registers[] = {
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_34,
- PMBUS_FAN_CONFIG_34
-};
-
static const int pmbus_fan_status_registers[] = {
PMBUS_STATUS_FAN_12,
PMBUS_STATUS_FAN_12,
@@ -1621,6 +1789,42 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
+
+/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
+static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ struct pmbus_data *data, int index, int page, int id,
+ u8 config)
+{
+ struct pmbus_sensor *sensor;
+
+ sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+ PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ return 0;
+
+ sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+ PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ true, false, false);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int pmbus_add_fan_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1655,9 +1859,18 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if (pmbus_add_sensor(data, "fan", "input", index,
page, pmbus_fan_registers[f],
- PSC_FAN, true, true) == NULL)
+ PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
+ /* Fan control */
+ if (pmbus_check_word_register(client, page,
+ pmbus_fan_command_registers[f])) {
+ ret = pmbus_add_fan_ctrl(client, data, index,
+ page, f, regval);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Each fan status register covers multiple fans,
* so we have to do some magic.
@@ -2168,6 +2381,14 @@ int pmbus_do_remove(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->debugfs;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_debugfs_dir);
+
static int __init pmbus_core_init(void)
{
pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 25d28343ba93..2be77752cd56 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -179,6 +179,7 @@ struct sht15_data {
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
+ * @len: Length of retrieved data
*
* This implements section 2 of the CRC datasheet.
*/
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 06706d288355..190e7b39ce32 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@
/**
* struct sht21 - SHT21 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: I2C client device
* @lock: mutex to protect measurement values
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 6ea99cd6ae79..370b57dafab7 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -732,6 +732,13 @@ static int sht3x_probe(struct i2c_client *client,
mutex_init(&data->i2c_lock);
mutex_init(&data->data_lock);
+ /*
+ * An attempt to read limits register too early
+ * causes a NACK response from the chip.
+ * Waiting for an empirical delay of 500 us solves the issue.
+ */
+ usleep_range(500, 600);
+
ret = limits_update(data);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
new file mode 100644
index 000000000000..e858093ac806
--- /dev/null
+++ b/drivers/hwmon/w83773g.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Driver for the Nuvoton W83773G SMBus temperature sensor IC.
+ * Supported models: W83773G
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* W83773 has 3 channels */
+#define W83773_CHANNELS 3
+
+/* The W83773 registers */
+#define W83773_CONVERSION_RATE_REG_READ 0x04
+#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
+#define W83773_MANUFACTURER_ID_REG 0xFE
+#define W83773_LOCAL_TEMP 0x00
+
+static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
+
+static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
+static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
+
+static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
+static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
+
+/* this is the number of sensors in the device */
+static const struct i2c_device_id w83773_id[] = {
+ { "w83773g" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, w83773_id);
+
+static const struct of_device_id w83773_of_match[] = {
+ {
+ .compatible = "nuvoton,w83773g"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w83773_of_match);
+
+static inline long temp_of_local(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline long temp_of_remote(s8 hb, u8 lb)
+{
+ return (hb << 3 | lb >> 5) * 125;
+}
+
+static int get_local_temp(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_LOCAL_TEMP, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_local(regval);
+ return 0;
+}
+
+static int get_remote_temp(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int get_fault(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_STATUS[index], &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = (regval & 0x04) >> 2;
+ return 0;
+}
+
+static int get_offset(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int set_offset(struct regmap *regmap, int index, long val)
+{
+ int ret;
+ u8 high_byte;
+ u8 low_byte;
+
+ val = clamp_val(val, -127825, 127825);
+ /* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
+ val /= 125;
+ high_byte = val >> 3;
+ low_byte = (val & 0x07) << 5;
+
+ ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
+}
+
+static int get_update_interval(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = 16000 >> regval;
+ return 0;
+}
+
+static int set_update_interval(struct regmap *regmap, long val)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval can be calculated as
+ * interval = (1 << (8 - rate)) * 62.5;
+ * Rounded rate is therefore
+ * rate = 8 - __fls(interval * 8 / (62.5 * 7));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ val = clamp_val(val, 62, 16000) * 10;
+ rate = 8 - __fls((val * 8 / (625 * 7)));
+ return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
+}
+
+static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return get_update_interval(regmap, val);
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ return get_local_temp(regmap, val);
+ return get_remote_temp(regmap, channel - 1, val);
+ case hwmon_temp_fault:
+ return get_fault(regmap, channel - 1, val);
+ case hwmon_temp_offset:
+ return get_offset(regmap, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return set_update_interval(regmap, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return set_offset(regmap, channel - 1, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const u32 w83773_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_chip = {
+ .type = hwmon_chip,
+ .config = w83773_chip_config,
+};
+
+static const u32 w83773_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_temp = {
+ .type = hwmon_temp,
+ .config = w83773_temp_config,
+};
+
+static const struct hwmon_channel_info *w83773_info[] = {
+ &w83773_chip,
+ &w83773_temp,
+ NULL
+};
+
+static const struct hwmon_ops w83773_ops = {
+ .is_visible = w83773_is_visible,
+ .read = w83773_read,
+ .write = w83773_write,
+};
+
+static const struct hwmon_chip_info w83773_chip_info = {
+ .ops = &w83773_ops,
+ .info = w83773_info,
+};
+
+static const struct regmap_config w83773_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int w83773_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Set the conversion rate to 2 Hz */
+ ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
+ if (ret < 0) {
+ dev_err(&client->dev, "error writing config rate register\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ regmap,
+ &w83773_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver w83773_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "w83773g",
+ .of_match_table = of_match_ptr(w83773_of_match),
+ },
+ .probe = w83773_probe,
+ .id_table = w83773_id,
+};
+
+module_i2c_driver(w83773_driver);
+
+MODULE_AUTHOR("Lei YU <mine260309@gmail.com>");
+MODULE_DESCRIPTION("W83773G temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index a18794128bf8..7c375443ede6 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -104,26 +104,17 @@ static int of_coresight_alloc_memory(struct device *dev,
int of_coresight_get_cpu(const struct device_node *node)
{
int cpu;
- bool found;
- struct device_node *dn, *np;
+ struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
-
/* Affinity defaults to CPU0 */
if (!dn)
return 0;
-
- for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
- found = (dn == np);
- of_node_put(np);
- if (found)
- break;
- }
+ cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
/* Affinity to CPU0 if no cpu nodes are found */
- return found ? cpu : 0;
+ return (cpu < 0) ? 0 : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7c6da2..7da75644c750 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
* @len: length of the data packet
*/
static void notrace
-stm_ftrace_write(const void *buf, unsigned int len)
+stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
- stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+ struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+
+ stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
}
static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dadb2dc5..44cffad43701 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id cht_wc_i2c_adap_id_table[] = {
+static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
{ .name = "cht_wcove_ext_chgr" },
{},
};
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 21bf619a86c5..9fee4c054d3d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
- bool suspended;
- bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 58add69a441c..153b947702c5 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -42,6 +42,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include "i2c-designware-core.h"
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
- return pm_runtime_suspended(dev);
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
}
static void dw_i2c_plat_complete(struct device *dev)
{
- if (dev->power.direct_complete)
+ /*
+ * The device can only be in runtime suspend at this point if it has not
+ * been resumed throughout the ending system suspend/resume cycle, so if
+ * the platform firmware might mess up with it, request the runtime PM
+ * framework to resume it.
+ */
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
#else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (i_dev->suspended) {
- i_dev->skip_resume = true;
- return 0;
- }
-
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
- i_dev->suspended = true;
-
return 0;
}
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (!i_dev->suspended)
- return 0;
-
- if (i_dev->skip_resume) {
- i_dev->skip_resume = false;
- return 0;
- }
-
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- i_dev->suspended = false;
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d32e5f..462948e2c535 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
if (adapdata->smba) {
i2c_del_adapter(adap);
- if (adapdata->port == (0 << 1)) {
+ if (adapdata->port == (0 << piix4_port_shift_sb800)) {
release_region(adapdata->smba, SMBIOSIZE);
if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab51761f8c5..d4f9cef251ac 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i2c-stm32.h
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec108496f15..47c8d00de53f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32 I2C controller
*
@@ -6,11 +7,11 @@
* http://www.st.com/resource/en/reference_manual/DM00031020.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2016
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-st.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c2e9aa..b445b3bb0bb1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32F7 I2C controller
*
@@ -7,11 +8,11 @@
* http://www.st.com/resource/en/reference_manual/dm00124865.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-stm32f4.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 706164b4c5be..f7829a74140c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (!client)
return;
- if (client->dev.of_node)
+
+ if (client->dev.of_node) {
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ of_node_put(client->dev.of_node);
+ }
+
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 4bb9927afd01..a1082c04ac5c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
the underlying bus driver */
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev, "Invalid block %s size %d\n",
+ read_write == I2C_SMBUS_READ ? "read" : "write",
+ data->block[0]);
+ return -EINVAL;
+ }
+
if (read_write == I2C_SMBUS_READ) {
msg[1].len = data->block[0];
} else {
msg[0].len = data->block[0] + 1;
- if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(&adapter->dev,
- "Invalid block write size %d\n",
- data->block[0]);
- return -EINVAL;
- }
for (i = 1; i <= data->block[0]; i++)
msgbuf0[i] = data->block[i];
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ef86296b8b0d..39e3b345a6c8 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -629,6 +629,18 @@ config SPEAR_ADC
To compile this driver as a module, choose M here: the
module will be called spear_adc.
+config SD_ADC_MODULATOR
+ tristate "Generic sigma delta modulator"
+ depends on OF
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Select this option to enables sigma delta modulator. This driver can
+ support generic sigma delta modulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called sd_adc_modulator.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +668,31 @@ config STM32_ADC
This driver can also be built as a module. If so, the module
will be called stm32-adc.
+config STM32_DFSDM_CORE
+ tristate "STMicroelectronics STM32 DFSDM core"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Select this option to enable the driver for STMicroelectronics
+ STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+ tristate "STMicroelectronics STM32 dfsdm adc"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+ STMicroelectronics STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86 && ISA_BUS_API
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9572c1090f35..28a9423997f3 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644
index 000000000000..560d8c7d9d86
--- /dev/null
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 1,
+ .shift = 0,
+ },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio;
+
+ iio = devm_iio_device_alloc(dev, 0);
+ if (!iio)
+ return -ENOMEM;
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = dev->of_node;
+ iio->name = dev_name(dev);
+ iio->info = &iio_sd_mod_iio_info;
+ iio->modes = INDIO_BUFFER_HARDWARE;
+
+ iio->num_channels = 1;
+ iio->channels = &iio_sd_mod_ch;
+
+ platform_set_drvdata(pdev, iio);
+
+ return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+ { .compatible = "sd-modulator" },
+ { .compatible = "ads1201" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+ .driver = {
+ .name = "iio_sd_adc_mod",
+ .of_match_table = of_match_ptr(sd_adc_of_match),
+ },
+ .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644
index 000000000000..daa026d6a94f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING 100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+ DFSDM_AUDIO,
+ DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+ int type;
+ int (*init)(struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+ struct stm32_dfsdm *dfsdm;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ unsigned int fl_id;
+ unsigned int ch_id;
+
+ /* ADC specific */
+ unsigned int oversamp;
+ struct iio_hw_consumer *hwc;
+ struct completion completion;
+ u32 *buffer;
+
+ /* Audio specific */
+ unsigned int spi_freq; /* SPI bus clock frequency */
+ unsigned int sample_freq; /* Sample frequency after filter decimation */
+ int (*cb)(const void *data, size_t size, void *cb_priv);
+ void *cb_priv;
+
+ /* DMA */
+ u8 *rx_buf;
+ unsigned int bufi; /* Buffer current position */
+ unsigned int buf_sz; /* Buffer size */
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+ const char *name;
+ unsigned int val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+ { "SPI_R", 0 }, /* SPI with data on rising edge */
+ { "SPI_F", 1 }, /* SPI with data on falling edge */
+ { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+ { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+ {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+ /* External SPI clock (CLKIN x) */
+ { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+ /* Internal SPI clock (CLKOUT) */
+ { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+ {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+ const struct stm32_dfsdm_str2field *list)
+{
+ const struct stm32_dfsdm_str2field *p = list;
+
+ for (p = list; p && p->name; p++)
+ if (!strcmp(p->name, str))
+ return p->val;
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ unsigned int fast, unsigned int oversamp)
+{
+ unsigned int i, d, fosr, iosr;
+ u64 res;
+ s64 delta;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+ * This function tries to compute filter oversampling and integrator
+ * oversampling, base on oversampling ratio requested by user.
+ *
+ * Decimation d depends on the filter order and the oversampling ratios.
+ * ford: filter order
+ * fosr: filter over sampling ratio
+ * iosr: integrator over sampling ratio
+ */
+ if (fl->ford == DFSDM_FASTSINC_ORDER) {
+ m = 2;
+ p = 2;
+ }
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+ * to reach 24 bits data output resolution.
+ * Leave as soon as if exact resolution if reached.
+ * Otherwise the higher resolution below 32 bits is kept.
+ */
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+ d = fosr * iosr;
+ else if (fl->ford == DFSDM_FASTSINC_ORDER)
+ d = fosr * (iosr + 3) + 2;
+ else
+ d = fosr * (iosr - 1 + p) + p;
+
+ if (d > oversamp)
+ break;
+ else if (d != oversamp)
+ continue;
+ /*
+ * Check resolution (limited to signed 32 bits)
+ * res <= 2^31
+ * Sincx filters:
+ * res = m * fosr^p x iosr (with m=1, p=ford)
+ * FastSinc filter
+ * res = m * fosr^p x iosr (with m=2, p=2)
+ */
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+ if (res > DFSDM_MAX_RES)
+ break;
+ }
+ if (res > DFSDM_MAX_RES)
+ continue;
+ res = res * (u64)m * (u64)iosr;
+ if (res > DFSDM_MAX_RES)
+ continue;
+
+ delta = res - DFSDM_DATA_RES;
+
+ if (res >= fl->res) {
+ fl->res = res;
+ fl->fosr = fosr;
+ fl->iosr = iosr;
+ fl->fast = fast;
+ pr_debug("%s: fosr = %d, iosr = %d\n",
+ __func__, fl->fosr, fl->iosr);
+ }
+
+ if (!delta)
+ return 0;
+ }
+ }
+
+ if (!fl->fosr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+ struct stm32_dfsdm_channel *ch)
+{
+ unsigned int id = ch->id;
+ struct regmap *regmap = dfsdm->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SITP_MASK,
+ DFSDM_CHCFGR1_SITP(ch->type));
+ if (ret < 0)
+ return ret;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SPICKSEL_MASK,
+ DFSDM_CHCFGR1_SPICKSEL(ch->src));
+ if (ret < 0)
+ return ret;
+ return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_CHINSEL_MASK,
+ DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
+{
+ int ret;
+
+ /* Enable filter */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+ if (ret < 0)
+ return ret;
+
+ /* Start conversion */
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSWSTART_MASK,
+ DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+ /* Disable conversion */
+ regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id, unsigned int ch_id)
+{
+ struct regmap *regmap = dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ int ret;
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+ DFSDM_FCR_IOSR(fl->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+ DFSDM_FCR_FOSR(fl->fosr - 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+ DFSDM_FCR_FORD(fl->ford));
+ if (ret)
+ return ret;
+
+ /* No scan mode supported for the moment */
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+ DFSDM_CR1_RCH(ch_id));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSYNC_MASK,
+ DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ const char *of_str;
+ int chan_idx = ch->scan_index;
+ int ret, val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-channels", chan_idx,
+ &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channels' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev,
+ " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-names", chan_idx,
+ &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channel-names' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-types", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-clk-src", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-alt-channel", chan_idx,
+ &df_ch->alt_si);
+ if (ret < 0)
+ df_ch->alt_si = 0;
+
+ return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int sample_freq = adc->sample_freq;
+ unsigned int spi_freq;
+ int ret;
+
+ dev_err(&indio_dev->dev, "enter %s\n", __func__);
+ /* If DFSDM is master on SPI, SPI freq can not be updated */
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &spi_freq);
+ if (ret)
+ return ret;
+
+ if (!spi_freq)
+ return -EINVAL;
+
+ if (sample_freq) {
+ if (spi_freq % sample_freq)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / sample_freq));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "No filter parameters that match!\n");
+ return ret;
+ }
+ }
+ adc->spi_freq = spi_freq;
+
+ return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+ unsigned int dma_en = 0, cont_en = 0;
+
+ ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+ adc->ch_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ if (dma) {
+ /* Enable DMA transfer*/
+ dma_en = DFSDM_CR1_RDMAEN(1);
+ /* Enable conversion triggered by SPI clock*/
+ cont_en = DFSDM_CR1_RCONT(1);
+ }
+ /* Enable DMA transfer*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, dma_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ /* Enable conversion triggered by SPI clock*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, cont_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ return 0;
+
+stop_channels:
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+ return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+
+ stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+ /* Clean conversion options */
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+ /*
+ * DMA cyclic transfers are used, buffer is split into two periods.
+ * There should be :
+ * - always one buffer (period) DMA is working on
+ * - one buffer (period) driver pushed to ASoC side.
+ */
+ watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+ adc->buf_sz = watermark * 2;
+
+ return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(adc->dma_chan,
+ adc->dma_chan->cookie,
+ &state);
+ if (status == DMA_IN_PROGRESS) {
+ /* Residue is size in bytes from end of buffer */
+ unsigned int i = adc->buf_sz - state.residue;
+ unsigned int size;
+
+ /* Return available bytes */
+ if (i >= adc->bufi)
+ size = i - adc->bufi;
+ else
+ size = adc->buf_sz + i - adc->bufi;
+
+ return size;
+ }
+
+ return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+ /*
+ * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+ * offers only an interface to push data samples per samples.
+ * For this reason IIO buffer interface is not used and interface is
+ * bypassed using a private callback registered by ASoC.
+ * This should be a temporary solution waiting a cyclic DMA engine
+ * support in IIO.
+ */
+
+ dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+ adc->bufi, available);
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+ *buffer = (*buffer & 0xFFFFFF00) << 8;
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos],
+ adc->buf_sz - old_pos, adc->cb_priv);
+ adc->bufi = 0;
+ old_pos = 0;
+ }
+ }
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+ adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+ adc->buf_sz, adc->buf_sz / 2);
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+ adc->dma_buf,
+ adc->buf_sz, adc->buf_sz / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_all(adc->dma_chan);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(adc->dma_chan);
+
+ return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ /* Reset adc buffer index */
+ adc->bufi = 0;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_start_conv(adc, true);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto stop_dfsdm;
+ }
+
+ if (adc->dma_chan) {
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
+ goto err_stop_conv;
+ }
+ }
+
+ return 0;
+
+err_stop_conv:
+ stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan)
+ dmaengine_terminate_all(adc->dma_chan);
+
+ stm32_dfsdm_stop_conv(adc);
+
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+ .postenable = &stm32_dfsdm_postenable,
+ .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ * DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ * - data: pointer to data buffer
+ * - size: size in byte of the data buffer
+ * - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = cb;
+ adc->cb_priv = private;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = NULL;
+ adc->cb_priv = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *res)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = res;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+ if (ret < 0)
+ goto stop_dfsdm;
+
+ ret = stm32_dfsdm_start_conv(adc, false);
+ if (ret < 0) {
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+ goto stop_dfsdm;
+ }
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ DFSDM_TIMEOUT);
+
+ /* Mask IRQ for regular conversion achievement*/
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else if (timeout < 0)
+ ret = timeout;
+ else
+ ret = IIO_VAL_INT;
+
+ stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int spi_freq = adc->spi_freq;
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = stm32_dfsdm_set_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+
+ return ret;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ spi_freq = adc->dfsdm->spi_master_freq;
+
+ if (spi_freq % val)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / val));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Not able to find parameter that match!\n");
+ return ret;
+ }
+ adc->sample_freq = val;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ iio_hw_consumer_disable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->oversamp;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->sample_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+ struct stm32_dfsdm_adc *adc = arg;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ unsigned int status, int_en;
+
+ regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+ regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+ if (status & DFSDM_ISR_REOCF_MASK) {
+ /* Read the data register clean the IRQ status */
+ regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+ complete(&adc->completion);
+ }
+
+ if (status & DFSDM_ISR_ROVRF_MASK) {
+ if (int_en & DFSDM_CR2_ROVRIE_MASK)
+ dev_warn(&indio_dev->dev, "Overrun detected\n");
+ regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK,
+ DFSDM_ICR_CLRROVRF_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+ /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+ {
+ .name = "spi_clk_freq",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = dfsdm_adc_audio_get_spiclk,
+ .write = dfsdm_adc_audio_set_spiclk,
+ },
+ {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan) {
+ dma_free_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+ dma_release_channel(adc->dma_chan);
+ }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+ DFSDM_RDATAR(adc->fl_id),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ &adc->dma_buf, GFP_KERNEL);
+ if (!adc->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+err_release:
+ dma_release_channel(adc->dma_chan);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
+ ch->type = IIO_VOLTAGE;
+ ch->indexed = 1;
+
+ /*
+ * IIO_CHAN_INFO_RAW: used to compute regular conversion
+ * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+ */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+ ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+ ch->scan_type.sign = 'u';
+ }
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+ adc->ch_id = ch->channel;
+
+ return stm32_dfsdm_chan_configure(adc->dfsdm,
+ &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_channel *d_ch;
+ int ret;
+
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+ ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->scan_index = 0;
+
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+ if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int num_ch;
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+ ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+ "st,adc-channels");
+ if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch < 0 ? num_ch : -EINVAL;
+ }
+
+ /* Bind to SD modulator IIO device */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return -EPROBE_DEFER;
+
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+ GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ ch->scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ }
+
+ indio_dev->num_channels = num_ch;
+ indio_dev->channels = ch;
+
+ init_completion(&adc->completion);
+
+ return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+ .type = DFSDM_IIO,
+ .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+ .type = DFSDM_AUDIO,
+ .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ {
+ .compatible = "st,stm32-dfsdm-adc",
+ .data = &stm32h7_dfsdm_adc_data,
+ },
+ {
+ .compatible = "st,stm32-dfsdm-dmic",
+ .data = &stm32h7_dfsdm_audio_data,
+ },
+ {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dfsdm_adc *adc;
+ struct device_node *np = dev->of_node;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct iio_dev *iio;
+ char *name;
+ int ret, irq, val;
+
+
+ dev_data = of_device_get_match_data(dev);
+ iio = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio) {
+ dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio);
+ adc->dfsdm = dev_get_drvdata(dev->parent);
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = np;
+ iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+ if (ret != 0) {
+ dev_err(dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (dev_data->type == DFSDM_AUDIO) {
+ iio->info = &stm32_dfsdm_info_audio;
+ snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+ } else {
+ iio->info = &stm32_dfsdm_info_adc;
+ snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+ }
+ iio->name = name;
+
+ /*
+ * In a first step IRQs generated for channels are not treated.
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set filter order\n");
+ return ret;
+ }
+
+ adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+ ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+ if (!ret)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+ ret = dev_data->init(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(iio);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dev_err(dev, "of_platform_populate\n");
+ if (dev_data->type == DFSDM_AUDIO) {
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find an audio DAI\n");
+ goto err_unregister;
+ }
+ }
+
+ return 0;
+
+err_unregister:
+ iio_device_unregister(iio);
+err_cleanup:
+ stm32_dfsdm_dma_release(iio);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (adc->dev_data->type == DFSDM_AUDIO)
+ of_platform_depopulate(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ stm32_dfsdm_dma_release(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+ .driver = {
+ .name = "stm32-dfsdm-adc",
+ .of_match_table = stm32_dfsdm_adc_match,
+ },
+ .probe = stm32_dfsdm_adc_probe,
+ .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644
index 000000000000..6290332cfd3f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+ unsigned int num_filters;
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS 4
+#define STM32H7_DFSDM_NUM_CHANNELS 8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < DFSDM_FILTER_BASE_ADR)
+ return false;
+
+ /*
+ * Mask is done on register to avoid to list registers of all
+ * filter instances.
+ */
+ switch (reg & DFSDM_FILTER_REG_MASK) {
+ case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x2B8,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+ .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+ .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+ struct platform_device *pdev; /* platform device */
+
+ struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+ unsigned int spi_clk_out_div; /* SPI clkout divider value */
+ atomic_t n_active_ch; /* number of current active channels */
+
+ struct clk *clk; /* DFSDM clock */
+ struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct device *dev = &priv->pdev->dev;
+ unsigned int clk_div = priv->spi_clk_out_div;
+ int ret;
+
+ if (atomic_inc_return(&priv->n_active_ch) == 1) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start clock\n");
+ goto error_ret;
+ }
+ if (priv->aclk) {
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start audio clock\n");
+ goto disable_clk;
+ }
+ }
+
+ /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+ if (ret < 0)
+ goto disable_aclk;
+
+ /* Global enable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(1));
+ if (ret < 0)
+ goto disable_aclk;
+ }
+
+ dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+
+disable_aclk:
+ clk_disable_unprepare(priv->aclk);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+error_ret:
+ atomic_dec(&priv->n_active_ch);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ int ret;
+
+ if (atomic_dec_and_test(&priv->n_active_ch)) {
+ /* Global disable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(0));
+ if (ret < 0)
+ return ret;
+
+ /* Stop SPI CLKOUT */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(0));
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(priv->clk);
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ }
+ dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ struct dfsdm_priv *priv)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ unsigned long clk_freq;
+ unsigned int spi_freq, rem;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+ * "dfsdm" or "audio" clocks can be used as source clock for
+ * the SPI clock out signal and internal processing, depending
+ * on use case.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+ return -EINVAL;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(priv->aclk))
+ priv->aclk = NULL;
+
+ if (priv->aclk)
+ clk_freq = clk_get_rate(priv->aclk);
+ else
+ clk_freq = clk_get_rate(priv->clk);
+
+ /* SPI clock out frequency */
+ ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &spi_freq);
+ if (ret < 0) {
+ /* No SPI master mode */
+ return 0;
+ }
+
+ priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ priv->dfsdm.spi_master_freq = spi_freq;
+
+ if (rem) {
+ dev_warn(&pdev->dev, "SPI clock not accurate\n");
+ dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+ clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+ }
+
+ return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+ {
+ .compatible = "st,stm32h7-dfsdm",
+ .data = &stm32h7_dfsdm_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+ struct dfsdm_priv *priv;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct stm32_dfsdm *dfsdm;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+
+ dev_data = of_device_get_match_data(&pdev->dev);
+
+ dfsdm = &priv->dfsdm;
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+ sizeof(*dfsdm->ch_list),
+ GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+ dfsdm->num_chs = dev_data->num_channels;
+
+ ret = stm32_dfsdm_parse_of(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+ dfsdm->base,
+ &stm32h7_dfsdm_regmap_cfg);
+ if (IS_ERR(dfsdm->regmap)) {
+ ret = PTR_ERR(dfsdm->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dfsdm);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+ .probe = stm32_dfsdm_probe,
+ .driver = {
+ .name = "stm32-dfsdm",
+ .of_match_table = stm32_dfsdm_of_match,
+ },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644
index 000000000000..8708394b0725
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset | Registers block |
+ * --------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * --------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * --------------------------------------------------------
+ * | ... | ..... |
+ * --------------------------------------------------------
+ * | 0x0E0 | CHANNEL 7 |
+ * --------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * --------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * --------------------------------------------------------
+ * | 0x300 | FILTER 2 |
+ * --------------------------------------------------------
+ * | 0x400 | FILTER 3 |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y) ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y) ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y) ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y) ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v) FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v) FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v) FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v) FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v) FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v) FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v) FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v) FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v) FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v) FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v) FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v) FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v) FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v) FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v) FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR 0x100
+#define DFSDM_FILTER_REG_MASK 0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x) ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x00)
+#define DFSDM_CR2(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x04)
+#define DFSDM_ISR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x08)
+#define DFSDM_ICR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x0C)
+#define DFSDM_JCHGR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x10)
+#define DFSDM_FCR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x14)
+#define DFSDM_JDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x18)
+#define DFSDM_RDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x1C)
+#define DFSDM_AWHTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x20)
+#define DFSDM_AWLTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x24)
+#define DFSDM_AWSR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x28)
+#define DFSDM_AWCFR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x2C)
+#define DFSDM_EXMAX(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x30)
+#define DFSDM_EXMIN(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK BIT(0)
+#define DFSDM_CR1_DFEN(v) FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK BIT(1)
+#define DFSDM_CR1_JSWSTART(v) FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK BIT(3)
+#define DFSDM_CR1_JSYNC(v) FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK BIT(4)
+#define DFSDM_CR1_JSCAN(v) FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK BIT(5)
+#define DFSDM_CR1_JDMAEN(v) FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v) FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v) FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK BIT(17)
+#define DFSDM_CR1_RSWSTART(v) FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK BIT(18)
+#define DFSDM_CR1_RCONT(v) FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK BIT(19)
+#define DFSDM_CR1_RSYNC(v) FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK BIT(21)
+#define DFSDM_CR1_RDMAEN(v) FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v) FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK BIT(29)
+#define DFSDM_CR1_FAST(v) FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK BIT(30)
+#define DFSDM_CR1_AWFSEL(v) FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK GENMASK(6, 0)
+#define DFSDM_CR2_IE(v) FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK BIT(0)
+#define DFSDM_CR2_JEOCIE(v) FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK BIT(1)
+#define DFSDM_CR2_REOCIE(v) FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK BIT(2)
+#define DFSDM_CR2_JOVRIE(v) FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK BIT(3)
+#define DFSDM_CR2_ROVRIE(v) FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK BIT(4)
+#define DFSDM_CR2_AWDIE(v) FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK BIT(5)
+#define DFSDM_CR2_SCDIE(v) FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK BIT(6)
+#define DFSDM_CR2_CKABIE(v) FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v) FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v) FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK BIT(0)
+#define DFSDM_ISR_JEOCF(v) FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK BIT(1)
+#define DFSDM_ISR_REOCF(v) FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK BIT(2)
+#define DFSDM_ISR_JOVRF(v) FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK BIT(3)
+#define DFSDM_ISR_ROVRF(v) FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK BIT(4)
+#define DFSDM_ISR_AWDF(v) FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK BIT(13)
+#define DFSDM_ISR_JCIP(v) FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK BIT(14)
+#define DFSDM_ISR_RCIP(v) FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v) FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v) FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v) FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK BIT(3)
+#define DFSDM_ICR_CLRROVRF(v) FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v) FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y) \
+ (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v) FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y) BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y) \
+ (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v) FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v) FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v) FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v) FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v) FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v) FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v) FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v) FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v) FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v) FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order */
+enum stm32_dfsdm_sinc_order {
+ DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+ DFSDM_SINC1_ORDER, /* Sinc 1 filter type */
+ DFSDM_SINC2_ORDER, /* Sinc 2 filter type */
+ DFSDM_SINC3_ORDER, /* Sinc 3 filter type */
+ DFSDM_SINC4_ORDER, /* Sinc 4 filter type (N.A. for watchdog) */
+ DFSDM_SINC5_ORDER, /* Sinc 5 filter type (N.A. for watchdog) */
+ DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+ unsigned int iosr;
+ unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+ u64 res;
+ unsigned int sync_mode;
+ unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+ unsigned int id;
+ unsigned int type;
+ unsigned int src;
+ unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap: regmap for register read/write
+ * @fl_list: filter resources list
+ * @num_fls: number of filter resources available
+ * @ch_list: channel resources list
+ * @num_chs: number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ struct regmap *regmap;
+ struct stm32_dfsdm_filter *fl_list;
+ unsigned int num_fls;
+ struct stm32_dfsdm_channel *ch_list;
+ unsigned int num_chs;
+ unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+ DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 4ffd3db7817f..338774cba19b 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
Should be selected by drivers that want to use this functionality.
+config IIO_BUFFER_HW_CONSUMER
+ tristate "Industrial I/O HW buffering"
+ help
+ Provides a way to bonding when an IIO device has a direct connection
+ to another device in hardware. In this case buffers for data transfers
+ are handled by hardware.
+
+ Should be selected by drivers that want to use the generic Hw consumer
+ interface.
+
config IIO_KFIFO_BUF
tristate "Industrial I/O buffering based on kfifo"
help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 95f9f41c58b7..1403eb2f9409 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -7,5 +7,6 @@
obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4847534700e7..ea63c838eeae 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -104,6 +104,17 @@ error_free_cb_buff:
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+ size_t watermark)
+{
+ if (!watermark)
+ return -EINVAL;
+ cb_buff->buffer.watermark = watermark;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 000000000000..95165697d8ae
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+ struct list_head buffers;
+ struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+ struct list_head head;
+ struct iio_dev *indio_dev;
+ struct iio_buffer buffer;
+ long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+ struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+ struct hw_consumer_buffer *hw_buf =
+ iio_buffer_to_hw_consumer_buffer(buffer);
+ kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+ .release = &iio_hw_buf_release,
+ .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+ struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+ size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ if (buf->indio_dev == indio_dev)
+ return buf;
+ }
+
+ buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buffer.access = &iio_hw_buf_access;
+ buf->indio_dev = indio_dev;
+ buf->buffer.scan_mask = buf->scan_mask;
+
+ iio_buffer_init(&buf->buffer);
+ list_add_tail(&buf->head, &hwc->buffers);
+
+ return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+ struct hw_consumer_buffer *buf;
+ struct iio_hw_consumer *hwc;
+ struct iio_channel *chan;
+ int ret;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hwc->buffers);
+
+ hwc->channels = iio_channel_get_all(dev);
+ if (IS_ERR(hwc->channels)) {
+ ret = PTR_ERR(hwc->channels);
+ goto err_free_hwc;
+ }
+
+ chan = &hwc->channels[0];
+ while (chan->indio_dev) {
+ buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_put_buffers;
+ }
+ set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+ chan++;
+ }
+
+ return hwc;
+
+err_put_buffers:
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ iio_channel_release_all(hwc->channels);
+err_free_hwc:
+ kfree(hwc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf, *n;
+
+ iio_channel_release_all(hwc->channels);
+ list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+ iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+ struct iio_hw_consumer **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+ struct iio_hw_consumer **ptr, *iio_hwc;
+
+ ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ iio_hwc = iio_hw_consumer_alloc(dev);
+ if (IS_ERR(iio_hwc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = iio_hwc;
+ devres_add(dev, ptr);
+ }
+
+ return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_hw_consumer_release,
+ devm_iio_hw_consumer_match, hwc);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+ int ret;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+ if (ret)
+ goto err_disable_buffers;
+ }
+
+ return 0;
+
+err_disable_buffers:
+ list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 4c45488e3a7f..c775fedbcaf6 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -43,7 +43,7 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d2b465140a6b..0bc2fe31f211 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -169,7 +169,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or 0 for other cases
*/
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filp->private_data;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 90fac8ec63c9..0bcf073e46db 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -95,12 +95,12 @@ EXPORT_SYMBOL(iio_push_event);
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or a negative error code on failure
*/
-static unsigned int iio_event_poll(struct file *filep,
+static __poll_t iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- unsigned int events = 0;
+ __poll_t events = 0;
if (!indio_dev->info)
return events;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 069defcc6d9b..ec98790e2a28 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -664,9 +664,8 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
-static int iio_read_channel_attribute(struct iio_channel *chan,
- int *val, int *val2,
- enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -682,6 +681,7 @@ err_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
chan->channel, val, val2, info);
}
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
goto err_unlock;
}
- ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index cbf186522016..5cd700421695 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET
depends on INET
depends on m || IPV6 != m
+ depends on !ALPHA
select IRQ_POLL
---help---
Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 504b926552c6..f69833db0a32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o
+ security.o nldev.o restrack.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f4e8185bccd3..a5b4cf030c11 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -243,8 +243,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr,
- u16 *vlan_id)
+ struct rdma_dev_addr *dev_addr)
{
struct net_device *dev;
@@ -266,9 +265,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
return -EADDRNOTAVAIL;
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -279,9 +275,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -481,7 +474,7 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr,
MAX_ADDR_LEN);
@@ -558,7 +551,7 @@ static int addr_resolve(struct sockaddr *src_in,
}
if (ndev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
/*
* Put the loopback device and get the translated
* device instead.
@@ -744,7 +737,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
- struct rdma_dev_addr *addr;
struct completion comp;
int status;
};
@@ -752,39 +744,31 @@ struct resolve_cb_context {
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- if (!status)
- memcpy(((struct resolve_cb_context *)context)->addr,
- addr, sizeof(struct rdma_dev_addr));
((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit)
{
- int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
- struct net_device *dev;
-
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
-
+ int ret;
rdma_gid2ip(&sgid_addr._sockaddr, sgid);
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- if (if_index)
- dev_addr.bound_dev_if = *if_index;
+ dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
- ctx.addr = &dev_addr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
&dev_addr, 1000, resolve_cb, &ctx);
@@ -798,42 +782,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
return ret;
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
- dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
- if (!dev)
- return -ENODEV;
- if (if_index)
- *if_index = dev_addr.bound_dev_if;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- if (hoplimit)
- *hoplimit = dev_addr.hoplimit;
- dev_put(dev);
- return ret;
-}
-EXPORT_SYMBOL(rdma_addr_find_l2_eth_by_grh);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
-{
- int ret = 0;
- struct rdma_dev_addr dev_addr;
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } gid_addr;
-
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
-
- memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.net = &init_net;
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
- if (ret)
- return ret;
-
- memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
- return ret;
+ *hoplimit = dev_addr.hoplimit;
+ return 0;
}
-EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 77515638c55c..e9a409d7f4e2 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -573,27 +573,24 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
struct ib_gid_attr attr;
if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
- goto next;
+ continue;
if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
- goto next;
+ continue;
memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
- if (filter(gid, &attr, context))
+ if (filter(gid, &attr, context)) {
found = true;
-
-next:
- if (found)
+ if (index)
+ *index = i;
break;
+ }
}
read_unlock_irqrestore(&table->rwlock, flags);
if (!found)
return -ENOENT;
-
- if (index)
- *index = i;
return 0;
}
@@ -824,12 +821,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
if (err)
return err;
- err = roce_rescan_device(ib_dev);
-
- if (err) {
- gid_table_cleanup_one(ib_dev);
- gid_table_release_one(ib_dev);
- }
+ rdma_roce_rescan_device(ib_dev);
return err;
}
@@ -883,7 +875,6 @@ int ib_find_gid_by_filter(struct ib_device *device,
port_num, filter,
context, index);
}
-EXPORT_SYMBOL(ib_find_gid_by_filter);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f6b159d79977..e6749157fd86 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,13 +452,14 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
- grh, &av->ah_attr);
+ return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
@@ -494,8 +495,11 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
- ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
+ &av->ah_attr);
+ if (ret)
+ return ret;
+
av->timeout = path->packet_life_time + 1;
spin_lock_irqsave(&cm.lock, flags);
@@ -1560,6 +1564,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
return pkey;
}
+/**
+ * Convert OPA SGID to IB SGID
+ * ULPs (such as IPoIB) do not understand OPA GIDs and will
+ * reject them as the local_gid will not match the sgid. Therefore,
+ * change the pathrec's SGID to an IB SGID.
+ *
+ * @work: Work completion
+ * @path: Path record
+ */
+static void cm_opa_to_ib_sgid(struct cm_work *work,
+ struct sa_path_rec *path)
+{
+ struct ib_device *dev = work->port->cm_dev->ib_device;
+ u8 port_num = work->port->port_num;
+
+ if (rdma_cap_opa_ah(dev, port_num) &&
+ (ib_is_opa_gid(&path->sgid))) {
+ union ib_gid sgid;
+
+ if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ dev_warn(&dev->dev,
+ "Error updating sgid in CM request\n");
+ return;
+ }
+
+ path->sgid = sgid;
+ }
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1573,10 +1606,13 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (cm_req_has_alt_path(req_msg))
+ cm_opa_to_ib_sgid(work, param->primary_path);
+ if (cm_req_has_alt_path(req_msg)) {
param->alternate_path = &work->path[1];
- else
+ cm_opa_to_ib_sgid(work, param->alternate_path);
+ } else {
param->alternate_path = NULL;
+ }
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
@@ -1826,9 +1862,11 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto destroy;
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
@@ -1841,9 +1879,10 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
- kfree(cm_id_priv->timewait_info);
- goto destroy;
+ goto free_timeinfo;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1861,56 +1900,50 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num,
grh->sgid_index,
&gid, &gid_attr);
- if (!ret) {
- if (gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
- if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
- sa_path_set_dmac(&work->path[0],
- cm_id_priv->av.ah_attr.roce.dmac);
- work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
- cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
+ goto rejected;
+ }
+
+ if (gid_attr.ndev) {
+ work->path[0].rec_type =
+ sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
+ sa_path_set_ifindex(&work->path[0],
+ gid_attr.ndev->ifindex);
+ sa_path_set_ndev(&work->path[0],
+ dev_net(gid_attr.ndev));
+ dev_put(gid_attr.ndev);
+ } else {
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
+ if (cm_req_has_alt_path(req_msg))
+ work->path[1].rec_type = work->path[0].rec_type;
+ cm_format_paths_from_req(req_msg, &work->path[0],
+ &work->path[1]);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ sa_path_set_dmac(&work->path[0],
+ cm_id_priv->av.ah_attr.roce.dmac);
+ work->path[0].hop_limit = grh->hop_limit;
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
if (ret) {
- int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- &gid_attr);
- if (!err && gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
- &work->path[0].sgid, sizeof work->path[0].sgid,
- NULL, 0);
+ int err;
+
+ err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid,
+ NULL);
+ if (err)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ NULL, 0, NULL, 0);
+ else
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid,
+ sizeof(work->path[0].sgid),
+ NULL, 0);
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
@@ -1919,7 +1952,7 @@ static int cm_req_handler(struct cm_work *work)
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
- sizeof work->path[0].sgid, NULL, 0);
+ sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
@@ -1945,6 +1978,8 @@ static int cm_req_handler(struct cm_work *work)
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
+free_timeinfo:
+ kfree(cm_id_priv->timewait_info);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
@@ -1997,6 +2032,8 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
+ pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2063,6 +2100,8 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
+ pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto error;
}
@@ -2170,6 +2209,8 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
+ pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
return -EINVAL;
}
@@ -2183,6 +2224,10 @@ static int cm_rep_handler(struct cm_work *work)
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+ __func__, cm_id_priv->id.state,
+ be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
@@ -2196,6 +2241,8 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: Failed to insert remote id %d\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
/* Check for a stale connection. */
@@ -2213,6 +2260,10 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
+
if (cur_cm_id_priv) {
cm_id = &cur_cm_id_priv->id;
ib_send_cm_dreq(cm_id, NULL, 0);
@@ -2359,6 +2410,8 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2428,6 +2481,8 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
+ pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
return -EINVAL;
}
@@ -2493,6 +2548,9 @@ static int cm_dreq_handler(struct cm_work *work)
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
+ pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(dreq_msg->local_comm_id),
+ be32_to_cpu(dreq_msg->remote_comm_id));
return -EINVAL;
}
@@ -2535,6 +2593,9 @@ static int cm_dreq_handler(struct cm_work *work)
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2638,6 +2699,8 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
cm_enter_timewait(cm_id_priv);
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2748,6 +2811,9 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto out;
}
@@ -2811,6 +2877,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
}
/* fall through */
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto error1;
}
@@ -2912,6 +2981,9 @@ static int cm_mra_handler(struct cm_work *work)
counter[CM_MRA_COUNTER]);
/* fall through */
default:
+ pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto out;
}
@@ -3085,6 +3157,12 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto deref;
+
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3131,9 +3209,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -3386,6 +3461,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
+ int ret;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
@@ -3398,9 +3474,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto out;
+
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
@@ -3692,6 +3771,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
+ pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3727,6 +3807,8 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
break;
}
@@ -3924,6 +4006,9 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -3971,6 +4056,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -4030,6 +4118,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f6983357145d..e66963ca58bd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -601,7 +601,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr, NULL);
+ ret = rdma_translate_ip(addr, dev_addr);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -612,11 +612,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
static inline int cma_validate_port(struct ib_device *device, u8 port,
enum ib_gid_type gid_type,
- union ib_gid *gid, int dev_type,
- int bound_if_index)
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
- int ret = -ENODEV;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
return ret;
@@ -624,11 +627,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
- ndev = dev_get_by_index(&init_net, bound_if_index);
- else
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ return ret;
+ } else {
gid_type = IB_GID_TYPE_IB;
-
+ }
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -669,8 +674,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
listen_id_priv->gid_type, gidp,
- dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -691,8 +695,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
cma_dev->default_gid_type[port - 1],
- gidp, dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ gidp, id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -2036,6 +2039,33 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ struct rdma_addr *addr = &cm_id->route.addr;
+
+ if (!cm_id->device) {
+ if (sgid)
+ memset(sgid, 0, sizeof(*sgid));
+ if (dgid)
+ memset(dgid, 0, sizeof(*dgid));
+ return;
+ }
+
+ if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
+ if (sgid)
+ rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
+ if (dgid)
+ rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
+ } else {
+ if (sgid)
+ rdma_addr_get_sgid(&addr->dev_addr, sgid);
+ if (dgid)
+ rdma_addr_get_dgid(&addr->dev_addr, dgid);
+ }
+}
+EXPORT_SYMBOL(rdma_read_gids);
+
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
@@ -2132,7 +2162,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2414,6 +2444,26 @@ out:
kfree(work);
}
+static void cma_init_resolve_route_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+}
+
+static void cma_init_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+}
+
static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2424,11 +2474,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
@@ -2449,10 +2495,63 @@ err1:
return ret;
}
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths)
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
+/*
+ * cma_iboe_set_path_rec_l2_fields() is helper function which sets
+ * path record type based on GID type.
+ * It also sets up other L2 fields which includes destination mac address
+ * netdev ifindex, of the path record.
+ * It returns the netdev of the bound interface for this path record entry.
+ */
+static struct net_device *
+cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ struct rdma_addr *addr = &route->addr;
+ unsigned long supported_gids;
+ struct net_device *ndev;
+
+ if (!addr->dev_addr.bound_dev_if)
+ return NULL;
+
+ ndev = dev_get_by_index(addr->dev_addr.net,
+ addr->dev_addr.bound_dev_if);
+ if (!ndev)
+ return NULL;
+
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ gid_type = cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
+ /* Use the hint from IP Stack to select GID Type */
+ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
+ gid_type = ib_network_to_gid_type(addr->dev_addr.network);
+ route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
+
+ sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
+ sa_path_set_ifindex(route->path_rec, ndev->ifindex);
+ sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
+ return ndev;
+}
+
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec)
{
struct rdma_id_private *id_priv;
+ struct net_device *ndev;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -2460,20 +2559,33 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- id->route.num_paths = num_paths;
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+ dev_put(ndev);
+ }
+
+ id->route.num_paths = 1;
return 0;
+
+err_free:
+ kfree(id->route.path_rec);
+ id->route.path_rec = NULL;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
-EXPORT_SYMBOL(rdma_set_ib_paths);
+EXPORT_SYMBOL(rdma_set_ib_path);
static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
{
@@ -2483,11 +2595,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
}
@@ -2510,26 +2618,14 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
-static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
- unsigned long supported_gids,
- enum ib_gid_type default_gid)
-{
- if ((network_type == RDMA_NETWORK_IPV4 ||
- network_type == RDMA_NETWORK_IPV6) &&
- test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
- return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- return default_gid;
-}
-
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct net_device *ndev = NULL;
- enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ struct net_device *ndev;
+
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
@@ -2539,9 +2635,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
-
route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
@@ -2550,42 +2643,17 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
- if (addr->dev_addr.bound_dev_if) {
- unsigned long supported_gids;
-
- ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
-
- supported_gids = roce_gid_type_mask_support(id_priv->id.device,
- id_priv->id.port_num);
- gid_type = cma_route_gid_type(addr->dev_addr.network,
- supported_gids,
- id_priv->gid_type);
- route->path_rec->rec_type =
- sa_conv_gid_to_pathrec_type(gid_type);
- sa_path_set_ndev(route->path_rec, &init_net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
- }
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
ret = -ENODEV;
goto err2;
}
- sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
-
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
- /* Use the hint from IP Stack to select GID Type */
- if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
- gid_type = ib_network_to_gid_type(addr->dev_addr.network);
- route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
-
if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
/* TODO: get the hoplimit from the inet/inet6 device */
route->path_rec->hop_limit = addr->dev_addr.hoplimit;
@@ -2607,11 +2675,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- work->event.status = 0;
-
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -2791,11 +2855,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -2821,11 +2881,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -3404,9 +3460,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = ret;
break;
}
- ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
- id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ ib_init_ah_attr_from_path(id_priv->id.device,
+ id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3873,7 +3930,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev =
- dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
enum ib_gid_type gid_type =
id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
@@ -4010,8 +4067,10 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
- mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
+ mgid->raw[0] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
+ mgid->raw[1] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -4061,7 +4120,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
goto out2;
@@ -4179,7 +4238,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
struct net_device *ndev = NULL;
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
+ ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
if (ndev) {
cma_igmp_send(ndev,
@@ -4235,7 +4294,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
if (event != NETDEV_BONDING_FAILOVER)
return NOTIFY_DONE;
- if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ if (!netif_is_bond_master(ndev))
return NOTIFY_DONE;
mutex_lock(&lock);
@@ -4432,7 +4491,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
@@ -4444,6 +4503,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
id_stats->qp_type = id->qp_type;
i_id++;
+ nlmsg_end(skb, nlh);
}
cb->args[1] = 0;
@@ -4458,7 +4518,7 @@ out:
return skb->len;
}
-static const struct rdma_nl_cbs cma_cb_table[] = {
+static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
};
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 31dfee0c8295..eee38b40be99 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -295,7 +295,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a664f8..c4560d84dfae 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -40,8 +40,12 @@
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/restrack.h>
#include "mad_priv.h"
+/* Total number of ports combined across all struct ib_devices's */
+#define RDMA_MAX_PORTS 1024
+
struct pkey_index_qp_list {
struct list_head pkey_index_list;
u16 pkey_index;
@@ -137,7 +141,6 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-int roce_rescan_device(struct ib_device *ib_dev);
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
int ib_cache_setup_one(struct ib_device *device);
@@ -191,13 +194,6 @@ void ib_sa_cleanup(void);
int rdma_nl_init(void);
void rdma_nl_exit(void);
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
@@ -213,11 +209,6 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec);
-
void ib_security_destroy_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
@@ -240,14 +231,6 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
#else
-static inline int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
-{
- return 0;
-}
-
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
}
@@ -314,8 +297,35 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
}
#endif
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
+
+static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_qp *qp;
+
+ qp = dev->create_qp(pd, attr, udata);
+ if (IS_ERR(qp))
+ return qp;
+
+ qp->device = dev;
+ qp->pd = pd;
+ /*
+ * We don't track XRC QPs for now, because they don't have PD
+ * and more importantly they are created internaly by driver,
+ * see mlx5 create_dev_resources() as an example.
+ */
+ if (attr->qp_type < IB_QPT_XRC_INI) {
+ qp->res.type = RDMA_RESTRACK_QP;
+ rdma_restrack_add(&qp->res);
+ } else
+ qp->res.valid = false;
+
+ return qp;
+}
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index f2ae75fa3128..bc79ca8215d7 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -25,9 +25,10 @@
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
-static int __ib_process_cq(struct ib_cq *cq, int budget)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
{
int i, n, completed = 0;
+ struct ib_wc *wcs = poll_wc ? : cq->wc;
/*
* budget might be (-1) if the caller does not
@@ -35,9 +36,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* minimum here.
*/
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
- budget - completed), cq->wc)) > 0) {
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
- struct ib_wc *wc = &cq->wc[i];
+ struct ib_wc *wc = &wcs[i];
if (wc->wr_cqe)
wc->wr_cqe->done(cq, wc);
@@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
- * This function is used to process all outstanding CQ entries on a
- * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
- * context and does not ask for completion interrupts from the HCA.
+ * This function is used to process all outstanding CQ entries.
+ * It does not offload CQ processing to a different context and does
+ * not ask for completion interrupts from the HCA.
+ * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
+ * concurrent processing.
*
* Note: do not pass -1 as %budget unless it is guaranteed that the number
* of completions that will be processed is small.
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
+ struct ib_wc wcs[IB_POLL_BATCH];
- return __ib_process_cq(cq, budget);
+ return __ib_process_cq(cq, budget, wcs);
}
EXPORT_SYMBOL(ib_process_cq_direct);
@@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed;
- completed = __ib_process_cq(cq, budget);
+ completed = __ib_process_cq(cq, budget, NULL);
if (completed < budget) {
irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -105,7 +108,7 @@ static void ib_cq_poll_work(struct work_struct *work)
struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed;
- completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work);
@@ -117,20 +120,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * ib_alloc_cq - allocate a completion queue
+ * __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
+ * @caller: module owner name.
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -154,6 +159,10 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
if (!cq->wc)
goto out_destroy_cq;
+ cq->res.type = RDMA_RESTRACK_CQ;
+ cq->res.kern_name = caller;
+ rdma_restrack_add(&cq->res);
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
cq->comp_handler = ib_cq_completion_direct;
@@ -178,11 +187,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
out_free_wc:
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_alloc_cq);
+EXPORT_SYMBOL(__ib_alloc_cq);
/**
* ib_free_cq - free a completion queue
@@ -209,6 +219,7 @@ void ib_free_cq(struct ib_cq *cq)
}
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
WARN_ON_ONCE(ret);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5e1be4949d5f..e8010e73a1cf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
{
struct ib_device *device;
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL;
}
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+ struct ib_device *device;
+
+ down_read(&lists_rwsem);
+ device = __ib_device_get_by_index(index);
+ if (device)
+ get_device(&device->dev);
+
+ up_read(&lists_rwsem);
+ return device;
+}
+
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
@@ -247,6 +263,8 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
+ rdma_restrack_init(&device->res);
+
device->dev.class = &ib_class;
device_initialize(&device->dev);
@@ -272,7 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
{
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
- kobject_put(&device->dev.kobj);
+ put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -446,7 +464,6 @@ int ib_register_device(struct ib_device *device,
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
- WARN_ON_ONCE(!parent);
WARN_ON_ONCE(device->dma_device);
if (device->dev.dma_ops) {
/*
@@ -455,16 +472,25 @@ int ib_register_device(struct ib_device *device,
* into device->dev.
*/
device->dma_device = &device->dev;
- if (!device->dev.dma_mask)
- device->dev.dma_mask = parent->dma_mask;
- if (!device->dev.coherent_dma_mask)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
+ if (!device->dev.dma_mask) {
+ if (parent)
+ device->dev.dma_mask = parent->dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
+ if (!device->dev.coherent_dma_mask) {
+ if (parent)
+ device->dev.coherent_dma_mask =
+ parent->coherent_dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
} else {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
*/
+ WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
@@ -572,6 +598,8 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
+ rdma_restrack_clean(&device->res);
+
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
@@ -1017,32 +1045,22 @@ EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
- * @gid_type: Type of GID.
* @ndev: The ndev related to the GID to search for.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the GID table where the GID was found. This
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index)
+ struct net_device *ndev, u8 *port_num, u16 *index)
{
union ib_gid tmp_gid;
int ret, port, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
- if (rdma_cap_roce_gid_table(device, port)) {
- if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, index)) {
- *port_num = port;
- return 0;
- }
- }
-
- if (gid_type != IB_GID_TYPE_IB)
+ if (rdma_cap_roce_gid_table(device, port))
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
@@ -1146,7 +1164,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
.flags = RDMA_NL_ADMIN_PERM,
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 84d2615b5d4b..a0a9ed719031 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -388,13 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
EXPORT_SYMBOL(ib_flush_fmr_pool);
/**
- * ib_fmr_pool_map_phys -
- * @pool:FMR pool to allocate FMR from
- * @page_list:List of pages to map
- * @list_len:Number of pages in @page_list
- * @io_virtual_address:I/O virtual address for new FMR
- *
- * Map an FMR from an FMR pool.
+ * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
+ * @pool_handle: FMR pool to allocate FMR from
+ * @page_list: List of pages to map
+ * @list_len: Number of pages in @page_list
+ * @io_virtual_address: I/O virtual address for new FMR
*/
struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
u64 *page_list,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189ec7502..5d676cff41f4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
}
EXPORT_SYMBOL(iwcm_reject_msg);
-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 3c4faadb8cdd..81528f64061a 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cb91245e9163..c50596f7f98a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -49,7 +49,6 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1fb72c356e36..3ccaae18ad75 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -41,8 +41,6 @@
#include <linux/module.h>
#include "core_priv.h"
-#include "core_priv.h"
-
static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
static struct {
@@ -83,15 +81,13 @@ static bool is_nl_valid(unsigned int type, unsigned int op)
if (!is_nl_msg_valid(type, op))
return false;
- cb_table = rdma_nl_types[type].cb_table;
-#ifdef CONFIG_MODULES
- if (!cb_table) {
+ if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type);
mutex_lock(&rdma_nl_mutex);
- cb_table = rdma_nl_types[type].cb_table;
}
-#endif
+
+ cb_table = rdma_nl_types[type].cb_table;
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
return false;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850a3eff..fa8655e3b3ed 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -31,6 +31,8 @@
*/
#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
#include <net/netlink.h>
#include <rdma/rdma_netlink.h>
@@ -52,16 +54,42 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
+ .len = 16 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
+ .len = TASK_COMM_LEN },
};
-static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
- char fw[IB_FW_VERSION_NAME_MAX];
-
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
return -EMSGSIZE;
+
+ return 0;
+}
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
return -EMSGSIZE;
@@ -92,10 +120,9 @@ static int fill_port_info(struct sk_buff *msg,
struct ib_port_attr attr;
int ret;
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
- return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
return -EMSGSIZE;
@@ -126,6 +153,137 @@ static int fill_port_info(struct sk_buff *msg,
return 0;
}
+static int fill_res_info_entry(struct sk_buff *msg,
+ const char *name, u64 curr)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
+ goto err;
+ if (nla_put_u64_64bit(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+{
+ static const char * const names[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_PD] = "pd",
+ [RDMA_RESTRACK_CQ] = "cq",
+ [RDMA_RESTRACK_QP] = "qp",
+ };
+
+ struct rdma_restrack_root *res = &device->res;
+ struct nlattr *table_attr;
+ int ret, i, curr;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
+ if (!names[i])
+ continue;
+ curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
+ ret = fill_res_info_entry(msg, names[i], curr);
+ if (ret)
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct ib_qp *qp, uint32_t port)
+{
+ struct rdma_restrack_entry *res = &qp->res;
+ struct ib_qp_init_attr qp_init_attr;
+ struct nlattr *entry_attr;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
+ if (ret)
+ return ret;
+
+ if (port && port != qp_attr.port_num)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ goto out;
+
+ /* In create_qp() port is not set yet */
+ if (qp_attr.port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
+ goto err;
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
+ qp_attr.dest_qp_num))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
+ qp_attr.rq_psn))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
+ goto err;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
+ qp_attr.path_mig_state))
+ goto err;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
+ goto err;
+
+ /*
+ * Existence of task means that it is user QP and netlink
+ * user is invited to go and read /proc/PID/comm to get name
+ * of the task file and res->task_com should be NULL.
+ */
+ if (rdma_is_kernel_res(res)) {
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, task_pid_vnr(res->task)))
+ goto err;
+ }
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+out:
+ return -EMSGSIZE;
+}
+
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -142,27 +300,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_dev_info(msg, device);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +385,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
- if (!rdma_is_port_valid(device, port))
- return -EINVAL;
+ if (!rdma_is_port_valid(device, port)) {
+ err = -EINVAL;
+ goto err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_port_info(msg, device, port);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +439,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(ifindex);
+ device = ib_device_get_by_index(ifindex);
if (!device)
return -EINVAL;
@@ -299,11 +473,220 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh);
}
-out: cb->args[0] = idx;
+out:
+ put_device(&device->dev);
+ cb->args[0] = idx;
return skb->len;
}
-static const struct rdma_nl_cbs nldev_cb_table[] = {
+static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ goto err;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, 0);
+
+ ret = fill_res_info(msg, device);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ put_device(&device->dev);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return ret;
+}
+
+static int _nldev_res_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_res_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
+}
+
+static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ int err, ret = 0, idx = 0;
+ struct nlattr *table_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct ib_qp *qp = NULL;
+ struct nlmsghdr *nlh;
+ u32 index, port = 0;
+
+ err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
+ /*
+ * Right now, we are expecting the device index to get QP information,
+ * but it is possible to extend this code to return all devices in
+ * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
+ * if it doesn't exist, we will iterate over all devices.
+ *
+ * But it is not needed for now.
+ */
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ /*
+ * If no PORT_INDEX is supplied, we will return all QPs from that device
+ */
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_index;
+ }
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_QP_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start(skb, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ down_read(&device->res.rwsem);
+ hash_for_each_possible(device->res.hash, res, node, RDMA_RESTRACK_QP) {
+ if (idx < start)
+ goto next;
+
+ if ((rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != &init_pid_ns) ||
+ (!rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != task_active_pid_ns(res->task)))
+ /*
+ * 1. Kernel QPs should be visible in init namspace only
+ * 2. Present only QPs visible in the current namespace
+ */
+ goto next;
+
+ if (!rdma_restrack_get(res))
+ /*
+ * Resource is under release now, but we are not
+ * relesing lock now, so it will be released in
+ * our next pass, once we will get ->next pointer.
+ */
+ goto next;
+
+ qp = container_of(res, struct ib_qp, res);
+
+ up_read(&device->res.rwsem);
+ ret = fill_res_qp_entry(skb, qp, port);
+ down_read(&device->res.rwsem);
+ /*
+ * Return resource back, but it won't be released till
+ * the &device->res.rwsem will be released for write.
+ */
+ rdma_restrack_put(res);
+
+ if (ret == -EMSGSIZE)
+ /*
+ * There is a chance to optimize here.
+ * It can be done by using list_prepare_entry
+ * and list_for_each_entry_continue afterwards.
+ */
+ break;
+ if (ret)
+ goto res_err;
+next: idx++;
+ }
+ up_read(&device->res.rwsem);
+
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more QPs to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!qp)
+ goto err;
+
+ put_device(&device->dev);
+ return skb->len;
+
+res_err:
+ nla_nest_cancel(skb, table_attr);
+ up_read(&device->res.rwsem);
+
+err:
+ nlmsg_cancel(skb, nlh);
+
+err_index:
+ put_device(&device->dev);
+ return ret;
+}
+
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
@@ -312,6 +695,23 @@ static const struct rdma_nl_cbs nldev_cb_table[] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_GET] = {
+ .doit = nldev_res_get_doit,
+ .dump = nldev_res_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .dump = nldev_res_get_qp_dumpit,
+ /*
+ * .doit is not implemented yet for two reasons:
+ * 1. It is not needed yet.
+ * 2. There is a need to provide identifier, while it is easy
+ * for the QPs (device index + port index + LQPN), it is not
+ * the case for the rest of resources (PD and CQ). Because it
+ * is better to provide similar interface for all resources,
+ * let's wait till we will have other resources implemented
+ * too.
+ */
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
new file mode 100644
index 000000000000..857637bf46da
--- /dev/null
+++ b/drivers/infiniband/core/restrack.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <linux/mutex.h>
+#include <linux/sched/task.h>
+#include <linux/uaccess.h>
+#include <linux/pid_namespace.h>
+
+void rdma_restrack_init(struct rdma_restrack_root *res)
+{
+ init_rwsem(&res->rwsem);
+}
+
+void rdma_restrack_clean(struct rdma_restrack_root *res)
+{
+ WARN_ON_ONCE(!hash_empty(res->hash));
+}
+
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns)
+{
+ struct rdma_restrack_entry *e;
+ u32 cnt = 0;
+
+ down_read(&res->rwsem);
+ hash_for_each_possible(res->hash, e, node, type) {
+ if (ns == &init_pid_ns ||
+ (!rdma_is_kernel_res(e) &&
+ ns == task_active_pid_ns(e->task)))
+ cnt++;
+ }
+ up_read(&res->rwsem);
+ return cnt;
+}
+EXPORT_SYMBOL(rdma_restrack_count);
+
+static void set_kern_name(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_qp *qp;
+
+ if (type != RDMA_RESTRACK_QP)
+ /* PD and CQ types already have this name embedded in */
+ return;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->pd) {
+ WARN_ONCE(true, "XRC QPs are not supported\n");
+ /* Survive, despite the programmer's error */
+ res->kern_name = " ";
+ return;
+ }
+
+ res->kern_name = qp->pd->res.kern_name;
+}
+
+static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_device *dev;
+ struct ib_xrcd *xrcd;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+
+ switch (type) {
+ case RDMA_RESTRACK_PD:
+ pd = container_of(res, struct ib_pd, res);
+ dev = pd->device;
+ break;
+ case RDMA_RESTRACK_CQ:
+ cq = container_of(res, struct ib_cq, res);
+ dev = cq->device;
+ break;
+ case RDMA_RESTRACK_QP:
+ qp = container_of(res, struct ib_qp, res);
+ dev = qp->device;
+ break;
+ case RDMA_RESTRACK_XRCD:
+ xrcd = container_of(res, struct ib_xrcd, res);
+ dev = xrcd->device;
+ break;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
+ return NULL;
+ }
+
+ return dev;
+}
+
+void rdma_restrack_add(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev = res_to_dev(res);
+
+ if (!dev)
+ return;
+
+ if (!uaccess_kernel()) {
+ get_task_struct(current);
+ res->task = current;
+ res->kern_name = NULL;
+ } else {
+ set_kern_name(res);
+ res->task = NULL;
+ }
+
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->valid = true;
+
+ down_write(&dev->res.rwsem);
+ hash_add(dev->res.hash, &res->node, res->type);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_add);
+
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
+{
+ return kref_get_unless_zero(&res->kref);
+}
+EXPORT_SYMBOL(rdma_restrack_get);
+
+static void restrack_release(struct kref *kref)
+{
+ struct rdma_restrack_entry *res;
+
+ res = container_of(kref, struct rdma_restrack_entry, kref);
+ complete(&res->comp);
+}
+
+int rdma_restrack_put(struct rdma_restrack_entry *res)
+{
+ return kref_put(&res->kref, restrack_release);
+}
+EXPORT_SYMBOL(rdma_restrack_put);
+
+void rdma_restrack_del(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev;
+
+ if (!res->valid)
+ return;
+
+ dev = res_to_dev(res);
+ if (!dev)
+ return;
+
+ rdma_restrack_put(res);
+
+ wait_for_completion(&res->comp);
+
+ down_write(&dev->res.rwsem);
+ hash_del(&res->node);
+ res->valid = false;
+ if (res->task)
+ put_task_struct(res->task);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 90e3889b7fbe..5a52ec77940a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -410,15 +410,18 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_unlock();
}
-/* This function will rescan all of the network devices in the system
- * and add their gids, as needed, to the relevant RoCE devices. */
-int roce_rescan_device(struct ib_device *ib_dev)
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
enum_all_gids_of_dev_cb, NULL);
-
- return 0;
}
+EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
u8 port,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index ab5e1024fea9..8cf15d4a8ac4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,9 +1227,9 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr)
{
int ret;
u16 gid_index;
@@ -1341,10 +1341,11 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_path);
+EXPORT_SYMBOL(ib_init_ah_attr_from_path);
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&query->port->ah_lock, flags);
@@ -1356,6 +1357,15 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->sm_ah = query->port->sm_ah;
spin_unlock_irqrestore(&query->port->ah_lock, flags);
+ /*
+ * Always check if sm_ah has valid dlid assigned,
+ * before querying for class port info
+ */
+ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
+ !rdma_is_valid_unicast_lid(&ah_attr)) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -EAGAIN;
+ }
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index a337386652b0..b61dda6b04fc 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
if (ret)
return ret;
+ if (!qp->qp_sec)
+ return 0;
+
mutex_lock(&real_qp->qp_sec->mutex);
ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
qp->qp_sec);
@@ -650,12 +653,11 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
return ret;
}
-EXPORT_SYMBOL(ib_security_modify_qp);
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
+static int ib_security_pkey_access(struct ib_device *dev,
+ u8 port_num,
+ u16 pkey_index,
+ void *sec)
{
u64 subnet_prefix;
u16 pkey;
@@ -675,7 +677,6 @@ int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
-EXPORT_SYMBOL(ib_security_pkey_access);
static int ib_mad_agent_security_change(struct notifier_block *nb,
unsigned long event,
@@ -739,8 +740,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
return 0;
- if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
- return -EACCES;
+ if (map->agent.qp->qp_type == IB_QPT_SMI) {
+ if (!map->agent.smp_allowed)
+ return -EACCES;
+ return 0;
+ }
return ib_security_pkey_access(map->agent.device,
map->agent.port_num,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index e30d86fa1855..8ae1308eecc7 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- WARN_ON_ONCE(!device->dev.parent);
ret = dev_set_name(class_dev, "%s", device->name);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f2a7f62c2834..8ae636bb09e5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -53,6 +53,8 @@
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Libor Michalek");
MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
MODULE_LICENSE("Dual BSD/GPL");
@@ -104,10 +106,13 @@ struct ib_ucm_event {
enum {
IB_UCM_MAJOR = 231,
IB_UCM_BASE_MINOR = 224,
- IB_UCM_MAX_DEVICES = 32
+ IB_UCM_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UCM_NUM_FIXED_MINOR = 32,
+ IB_UCM_NUM_DYNAMIC_MINOR = IB_UCM_MAX_DEVICES - IB_UCM_NUM_FIXED_MINOR,
};
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
+static dev_t dynamic_ucm_dev;
static void ib_ucm_add_one(struct ib_device *device);
static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
@@ -1130,11 +1135,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
return result;
}
-static unsigned int ib_ucm_poll(struct file *filp,
+static __poll_t ib_ucm_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_ucm_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
@@ -1199,7 +1204,6 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1210,10 +1214,7 @@ static void ib_ucm_release_dev(struct device *dev)
static void ib_ucm_free_dev(struct ib_ucm_device *ucm_dev)
{
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(ucm_dev->devnum, dev_map);
- else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
+ clear_bit(ucm_dev->devnum, dev_map);
}
static const struct file_operations ucm_fops = {
@@ -1235,27 +1236,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
-static dev_t overflow_maj;
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- pr_err("ucm: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
- if (ret >= IB_UCM_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_ucm_add_one(struct ib_device *device)
{
int devnum;
@@ -1274,19 +1254,14 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->dev.release = ib_ucm_release_dev;
devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (devnum >= IB_UCM_MAX_DEVICES) {
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- ucm_dev->devnum = devnum;
- base = devnum + IB_UCM_BASE_DEV;
- set_bit(devnum, dev_map);
- }
+ if (devnum >= IB_UCM_MAX_DEVICES)
+ goto err;
+ ucm_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UCM_NUM_FIXED_MINOR)
+ base = dynamic_ucm_dev + devnum - IB_UCM_NUM_FIXED_MINOR;
+ else
+ base = IB_UCM_BASE_DEV + devnum;
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
@@ -1334,13 +1309,20 @@ static int __init ib_ucm_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR,
"infiniband_cm");
if (ret) {
pr_err("ucm: couldn't register device number\n");
goto error1;
}
+ ret = alloc_chrdev_region(&dynamic_ucm_dev, 0, IB_UCM_NUM_DYNAMIC_MINOR,
+ "infiniband_cm");
+ if (ret) {
+ pr_err("ucm: couldn't register dynamic device number\n");
+ goto err_alloc;
+ }
+
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("ucm: couldn't create abi_version attribute\n");
@@ -1357,7 +1339,9 @@ static int __init ib_ucm_init(void)
error3:
class_remove_file(&cm_class, &class_attr_abi_version.attr);
error2:
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
+err_alloc:
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
error1:
return ret;
}
@@ -1366,9 +1350,8 @@ static void __exit ib_ucm_cleanup(void)
{
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version.attr);
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eb85b546e223..6ba4231f2b07 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,13 +904,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
IB_PATH_BIDIRECTIONAL;
- if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
- ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
- } else {
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
struct sa_path_rec ib;
sa_convert_path_opa_to_ib(&ib, rec);
ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
+
+ } else {
+ ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
}
}
@@ -943,8 +944,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
+ NULL);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.src_addr);
}
@@ -956,8 +957,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, NULL,
+ (union ib_gid *)&addr->sib_addr);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.dst_addr);
}
@@ -1231,9 +1232,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
} else {
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
}
if (ret)
return ret;
@@ -1630,10 +1631,10 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ucma_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 130606c3b07c..9a4e899d94b3 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 4b64dd02e090..78c77962422e 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -55,16 +55,21 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
enum {
- IB_UMAD_MAX_PORTS = 64,
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
IB_UMAD_MAJOR = 231,
- IB_UMAD_MINOR_BASE = 0
+ IB_UMAD_MINOR_BASE = 0,
+ IB_UMAD_NUM_FIXED_MINOR = 64,
+ IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
+ IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
};
/*
@@ -127,9 +132,12 @@ struct ib_umad_packet {
static struct class *umad_class;
-static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ IB_UMAD_NUM_FIXED_MINOR;
+static dev_t dynamic_umad_dev;
+static dev_t dynamic_issm_dev;
-static DEFINE_SPINLOCK(port_lock);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -233,8 +241,7 @@ static void recv_handler(struct ib_mad_agent *agent,
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
- if (agent->device->port_immutable[agent->port_num].core_cap_flags &
- RDMA_CORE_PORT_INTEL_OPA)
+ if (rdma_cap_opa_mad(agent->device, agent->port_num))
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
mad_recv_wc->wc->slid);
else
@@ -246,10 +253,14 @@ static void recv_handler(struct ib_mad_agent *agent,
if (packet->mad.hdr.grh_present) {
struct rdma_ah_attr ah_attr;
const struct ib_global_route *grh;
+ int ret;
- ib_init_ah_from_wc(agent->device, agent->port_num,
- mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
- &ah_attr);
+ ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto err2;
grh = rdma_ah_read_grh(&ah_attr);
packet->mad.hdr.gid_index = grh->sgid_index;
@@ -500,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
+ ah_attr.type = rdma_ah_find_type(agent->device,
file->port->port_num);
rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
@@ -617,12 +628,12 @@ err:
return ret;
}
-static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ib_umad_file *file = filp->private_data;
/* we will always be able to post a MAD send */
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &file->recv_wait, wait);
@@ -1139,54 +1150,26 @@ static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_MAD_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
-static int find_overflow_devnum(struct ib_device *device)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
- if (ret) {
- dev_err(&device->dev,
- "couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
- if (ret >= IB_UMAD_MAX_PORTS)
- return -1;
-
- return ret;
-}
-
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
- dev_t base;
+ dev_t base_umad;
+ dev_t base_issm;
- spin_lock(&port_lock);
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS) {
- spin_unlock(&port_lock);
- devnum = find_overflow_devnum(device);
- if (devnum < 0)
- return -1;
-
- spin_lock(&port_lock);
- port->dev_num = devnum + IB_UMAD_MAX_PORTS;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
+ if (devnum >= IB_UMAD_MAX_PORTS)
+ return -1;
+ port->dev_num = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+ base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+ base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
} else {
- port->dev_num = devnum;
- base = devnum + base_dev;
- set_bit(devnum, dev_map);
+ base_umad = devnum + base_umad_dev;
+ base_issm = devnum + base_issm_dev;
}
- spin_unlock(&port_lock);
port->ib_dev = device;
port->port_num = port_num;
@@ -1198,7 +1181,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->cdev.owner = THIS_MODULE;
cdev_set_parent(&port->cdev, &umad_dev->kobj);
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
- if (cdev_add(&port->cdev, base, 1))
+ if (cdev_add(&port->cdev, base_umad, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dev.parent,
@@ -1212,12 +1195,11 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
cdev_set_parent(&port->sm_cdev, &umad_dev->kobj);
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
- if (cdev_add(&port->sm_cdev, base, 1))
+ if (cdev_add(&port->sm_cdev, base_issm, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dev.parent,
@@ -1244,10 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
return -1;
}
@@ -1281,11 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
-
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(port->dev_num, dev_map);
- else
- clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
+ clear_bit(port->dev_num, dev_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1361,13 +1336,23 @@ static int __init ib_umad_init(void)
{
int ret;
- ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
+ ret = register_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2,
"infiniband_mad");
if (ret) {
pr_err("couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2,
+ "infiniband_mad");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+ dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
+
umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class);
@@ -1395,7 +1380,12 @@ out_class:
class_destroy(umad_class);
out_chrdev:
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
+
+out_alloc:
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
out:
return ret;
@@ -1405,9 +1395,10 @@ static void __exit ib_umad_cleanup(void)
{
ib_unregister_client(&umad_client);
class_destroy(umad_class);
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d55710b116..256934d1f64f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -340,6 +340,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
uobj->object = pd;
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
+ pd->res.type = RDMA_RESTRACK_PD;
+ rdma_restrack_add(&pd->res);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
@@ -1033,6 +1035,8 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
goto err_cb;
uobj_alloc_commit(&obj->uobject);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
return obj;
@@ -1145,10 +1149,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return 0;
+ return PTR_ERR_OR_ZERO(obj);
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
@@ -1199,7 +1200,7 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.ex.imm_data = wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
@@ -1517,7 +1518,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = device->create_qp(pd, &attr, uhw);
+ qp = _ib_create_qp(device, pd, &attr, uhw);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1530,7 +1531,6 @@ static int create_qp(struct ib_uverbs_file *file,
goto err_cb;
qp->real_qp = qp;
- qp->device = device;
qp->pd = pd;
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
+ if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+ !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
attr->qp_state = cmd->base.qp_state;
attr->cur_qp_state = cmd->base.cur_qp_state;
attr->path_mtu = cmd->base.path_mtu;
@@ -2068,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
return -EOPNOTSUPP;
if (ucore->inlen > sizeof(cmd)) {
- if (ib_is_udata_cleared(ucore, sizeof(cmd),
- ucore->inlen - sizeof(cmd)))
+ if (!ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 71ff2644e053..d96dc1d17be1 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -243,16 +243,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
size_t ctx_size;
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
- if (hdr->reserved)
- return -EINVAL;
-
object_spec = uverbs_get_object(ib_dev, hdr->object_id);
if (!object_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
method_spec = uverbs_get_method(object_spec, hdr->method_id);
if (!method_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
return -EINVAL;
@@ -305,6 +302,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
+
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (unlikely(err == -EPROTONOSUPPORT)) {
+ WARN_ON_ONCE(err == -EPROTONOSUPPORT);
+ err = -EINVAL;
+ }
out:
if (ctx != (void *)data)
kfree(ctx);
@@ -341,7 +348,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (hdr.reserved) {
- err = -EOPNOTSUPP;
+ err = -EPROTONOSUPPORT;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 381fd9c096ae..5b811bf574d6 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -62,14 +62,16 @@ MODULE_LICENSE("Dual BSD/GPL");
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
- IB_UVERBS_MAX_DEVICES = 32
+ IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UVERBS_NUM_FIXED_MINOR = 32,
+ IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
};
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
+static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
@@ -339,11 +341,11 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
sizeof(struct ib_uverbs_comp_event_desc));
}
-static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
struct file *filp,
struct poll_table_struct *wait)
{
- unsigned int pollflags = 0;
+ __poll_t pollflags = 0;
poll_wait(filp, &ev_queue->poll_wait, wait);
@@ -355,13 +357,13 @@ static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
return pollflags;
}
-static unsigned int ib_uverbs_async_event_poll(struct file *filp,
+static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
return ib_uverbs_event_poll(filp->private_data, filp, wait);
}
-static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
+static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_uverbs_completion_event_file *comp_ev_file =
@@ -1005,34 +1007,6 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
-
-/*
- * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
- * requesting a new major number and doubling the number of max devices we
- * support. It's stupid, but simple.
- */
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
- "infiniband_verbs");
- if (ret) {
- pr_err("user_verbs: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
- if (ret >= IB_UVERBS_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1062,24 +1036,15 @@ static void ib_uverbs_add_one(struct ib_device *device)
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
- spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES) {
- spin_unlock(&map_lock);
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- spin_lock(&map_lock);
- uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- uverbs_dev->devnum = devnum;
- base = devnum + IB_UVERBS_BASE_DEV;
- set_bit(devnum, dev_map);
- }
- spin_unlock(&map_lock);
+ if (devnum >= IB_UVERBS_MAX_DEVICES)
+ goto err;
+ uverbs_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+ base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+ else
+ base = IB_UVERBS_BASE_DEV + devnum;
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
@@ -1124,10 +1089,7 @@ err_class:
err_cdev:
cdev_del(&uverbs_dev->cdev);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
@@ -1219,11 +1181,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
dev_set_drvdata(uverbs_dev->dev, NULL);
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
cdev_del(&uverbs_dev->cdev);
-
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(uverbs_dev->devnum, dev_map);
- else
- clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ clear_bit(uverbs_dev->devnum, dev_map);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1265,13 +1223,22 @@ static int __init ib_uverbs_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR,
"infiniband_verbs");
if (ret) {
pr_err("user_verbs: couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
+ IB_UVERBS_NUM_DYNAMIC_MINOR,
+ "infiniband_verbs");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
@@ -1299,7 +1266,12 @@ out_class:
class_destroy(uverbs_class);
out_chrdev:
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+
+out_alloc:
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
out:
return ret;
@@ -1309,9 +1281,10 @@ static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
class_destroy(uverbs_class);
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
}
module_init(ib_uverbs_init);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index c3ee5d9b336d..b571176babbe 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -35,6 +35,7 @@
#include <rdma/ib_verbs.h>
#include <linux/bug.h>
#include <linux/file.h>
+#include <rdma/restrack.h>
#include "rdma_core.h"
#include "uverbs.h"
@@ -319,6 +320,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
obj->uobject.object = cq;
obj->uobject.user_handle = user_handle;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
if (ret)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3fb8fb6cc824..16ebc6372c31 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -124,16 +124,24 @@ EXPORT_SYMBOL(ib_wc_status_msg);
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
- case IB_RATE_2_5_GBPS: return 1;
- case IB_RATE_5_GBPS: return 2;
- case IB_RATE_10_GBPS: return 4;
- case IB_RATE_20_GBPS: return 8;
- case IB_RATE_30_GBPS: return 12;
- case IB_RATE_40_GBPS: return 16;
- case IB_RATE_60_GBPS: return 24;
- case IB_RATE_80_GBPS: return 32;
- case IB_RATE_120_GBPS: return 48;
- default: return -1;
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ case IB_RATE_14_GBPS: return 6;
+ case IB_RATE_56_GBPS: return 22;
+ case IB_RATE_112_GBPS: return 45;
+ case IB_RATE_168_GBPS: return 67;
+ case IB_RATE_25_GBPS: return 10;
+ case IB_RATE_100_GBPS: return 40;
+ case IB_RATE_200_GBPS: return 80;
+ case IB_RATE_300_GBPS: return 120;
+ default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
@@ -141,16 +149,24 @@ EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
- case 1: return IB_RATE_2_5_GBPS;
- case 2: return IB_RATE_5_GBPS;
- case 4: return IB_RATE_10_GBPS;
- case 8: return IB_RATE_20_GBPS;
- case 12: return IB_RATE_30_GBPS;
- case 16: return IB_RATE_40_GBPS;
- case 24: return IB_RATE_60_GBPS;
- case 32: return IB_RATE_80_GBPS;
- case 48: return IB_RATE_120_GBPS;
- default: return IB_RATE_PORT_CURRENT;
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ case 6: return IB_RATE_14_GBPS;
+ case 22: return IB_RATE_56_GBPS;
+ case 45: return IB_RATE_112_GBPS;
+ case 67: return IB_RATE_168_GBPS;
+ case 10: return IB_RATE_25_GBPS;
+ case 40: return IB_RATE_100_GBPS;
+ case 80: return IB_RATE_200_GBPS;
+ case 120: return IB_RATE_300_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
@@ -247,6 +263,10 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
+ pd->res.type = RDMA_RESTRACK_PD;
+ pd->res.kern_name = caller;
+ rdma_restrack_add(&pd->res);
+
if (mr_access_flags) {
struct ib_mr *mr;
@@ -296,6 +316,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
requires the caller to guarantee we can't race here. */
WARN_ON(atomic_read(&pd->usecnt));
+ rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return
an error here. */
ret = pd->device->dealloc_pd(pd);
@@ -421,8 +442,7 @@ static bool find_gid_index(const union ib_gid *gid,
const struct ib_gid_attr *gid_attr,
void *context)
{
- struct find_gid_index_context *ctx =
- (struct find_gid_index_context *)context;
+ struct find_gid_index_context *ctx = context;
if (ctx->gid_type != gid_attr->gid_type)
return false;
@@ -481,8 +501,53 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/* Resolve destination mac address and hop limit for unicast destination
+ * GID entry, considering the source GID entry as well.
+ * ah_attribute must have have valid port_num, sgid_index.
+ */
+static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct ib_gid_attr sgid_attr;
+ struct ib_global_route *grh;
+ int hop_limit = 0xff;
+ union ib_gid sgid;
+ int ret;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+
+ ret = ib_query_gid(device,
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index,
+ &sgid, &sgid_attr);
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ return ret;
+ }
+
+ /* If destination is link local and source GID is RoCEv1,
+ * IP stack is not used.
+ */
+ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
+ sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
+ ah_attr->roce.dmac);
+ goto done;
+ }
+
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ah_attr->roce.dmac,
+ sgid_attr.ndev, &hop_limit);
+done:
+ dev_put(sgid_attr.ndev);
+
+ grh->hop_limit = hop_limit;
+ return ret;
+}
+
/*
- * This function creates ah from the incoming packet.
+ * This function initializes address handle attributes from the incoming packet.
* Incoming packet has dgid of the receiver node on which this code is
* getting executed and, sgid contains the GID of the sender.
*
@@ -490,13 +555,10 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
- * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
- * position of arguments dgid and sgid do not match the order of the
- * parameters.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
u16 gid_index;
@@ -523,57 +585,33 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
if (ret)
return ret;
+ rdma_ah_set_sl(ah_attr, wc->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
if (rdma_protocol_roce(device, port_num)) {
- int if_index = 0;
u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
wc->vlan_id : 0xffff;
- struct net_device *idev;
- struct net_device *resolved_dev;
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
- ah_attr->roce.dmac,
- wc->wc_flags & IB_WC_WITH_VLAN ?
- NULL : &vlan_id,
- &if_index, &hoplimit);
- if (ret) {
- dev_put(idev);
- return ret;
- }
-
- resolved_dev = dev_get_by_index(&init_net, if_index);
- rcu_read_lock();
- if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
- resolved_dev))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(idev);
- dev_put(resolved_dev);
+ ret = get_sgid_index_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type, &gid_index);
if (ret)
return ret;
- ret = get_sgid_index_from_eth(device, port_num, vlan_id,
- &dgid, gid_type, &gid_index);
- if (ret)
- return ret;
- }
-
- rdma_ah_set_dlid(ah_attr, wc->slid);
- rdma_ah_set_sl(ah_attr, wc->sl);
- rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- rdma_ah_set_port_num(ah_attr, port_num);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ } else {
+ rdma_ah_set_dlid(ah_attr, wc->slid);
+ rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (!rdma_cap_eth_ah(device, port_num)) {
+ if (wc->wc_flags & IB_WC_GRH) {
if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
ret = ib_find_cached_gid_by_port(device, &dgid,
IB_GID_TYPE_IB,
@@ -584,18 +622,17 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
} else {
gid_index = 0;
}
- }
-
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ }
+ return 0;
}
- return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_wc);
+EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
@@ -603,7 +640,7 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
struct rdma_ah_attr ah_attr;
int ret;
- ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
+ ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
@@ -850,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = device->create_qp(pd, qp_init_attr, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
if (IS_ERR(qp))
return qp;
@@ -860,7 +897,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- qp->device = device;
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
@@ -890,7 +926,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
atomic_inc(&qp_init_attr->srq->usecnt);
}
- qp->pd = pd;
qp->send_cq = qp_init_attr->send_cq;
qp->xrcd = NULL;
@@ -1269,16 +1304,8 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
return -EINVAL;
- if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
- return 0;
-
grh = rdma_ah_retrieve_grh(ah_attr);
- if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
- ah_attr->roce.dmac);
- return 0;
- }
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
@@ -1290,40 +1317,52 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
(char *)ah_attr->roce.dmac);
}
} else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
-
- ifindex = sgid_attr.ndev->ifindex;
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ }
+ return ret;
+}
- ret =
- rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac,
- NULL, &ifindex, &hop_limit);
+/**
+ * IB core internal function to perform QP attributes modification.
+ */
+static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ int ret;
- dev_put(sgid_attr.ndev);
+ if (rdma_ib_or_roce(qp->device, port)) {
+ if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
+ pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->rq_psn &= 0xffffff;
+ }
- grh->hop_limit = hop_limit;
+ if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
+ pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->sq_psn &= 0xffffff;
+ }
}
-out:
+
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
- * @qp: The QP to modify.
+ * @ib_qp: The QP to modify.
* @attr: On input, specifies the QP attributes to modify. On output,
* the current values of selected QP attributes are returned.
* @attr_mask: A bit-mask used to specify which attributes of the QP
@@ -1332,21 +1371,20 @@ out:
* are being modified.
* It returns 0 on success and returns appropriate error code on error.
*/
-int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ struct ib_qp *qp = ib_qp->real_qp;
int ret;
- if (attr_mask & IB_QP_AV) {
+ if (attr_mask & IB_QP_AV &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
- ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
-
- return ret;
+ return _ib_modify_qp(qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1409,7 +1447,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
+ return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1438,7 +1476,8 @@ int ib_close_qp(struct ib_qp *qp)
spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
atomic_dec(&real_qp->usecnt);
- ib_close_shared_qp_security(qp->qp_sec);
+ if (qp->qp_sec)
+ ib_close_shared_qp_security(qp->qp_sec);
kfree(qp);
return 0;
@@ -1502,6 +1541,7 @@ int ib_destroy_qp(struct ib_qp *qp)
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
+ rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
@@ -1544,6 +1584,8 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->event_handler = event_handler;
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
}
return cq;
@@ -1562,6 +1604,7 @@ int ib_destroy_cq(struct ib_cq *cq)
if (atomic_read(&cq->usecnt))
return -EBUSY;
+ rdma_restrack_del(&cq->res);
return cq->device->destroy_cq(cq);
}
EXPORT_SYMBOL(ib_destroy_cq);
@@ -1746,7 +1789,7 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
}
EXPORT_SYMBOL(ib_detach_mcast);
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
{
struct ib_xrcd *xrcd;
@@ -1764,7 +1807,7 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
return xrcd;
}
-EXPORT_SYMBOL(ib_alloc_xrcd);
+EXPORT_SYMBOL(__ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
@@ -1789,11 +1832,11 @@ EXPORT_SYMBOL(ib_dealloc_xrcd);
* ib_create_wq - Creates a WQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the WQ.
- * @wq_init_attr: A list of initial attributes required to create the
+ * @wq_attr: A list of initial attributes required to create the
* WQ. If WQ creation succeeds, then the attributes are updated to
* the actual capabilities of the created WQ.
*
- * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * wq_attr->max_wr and wq_attr->max_sge determine
* the requested size of the WQ, and set to the actual values allocated
* on return.
* If ib_create_wq() succeeds, then max_wr and max_sge will always be
@@ -2155,16 +2198,16 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_send_wr swr = {}, *bad_swr;
int ret;
- swr.wr_cqe = &sdrain.cqe;
- sdrain.cqe.done = ib_drain_qp_done;
- init_completion(&sdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
ret = ib_post_send(qp, &swr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
@@ -2189,16 +2232,16 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_recv_wr rwr = {}, *bad_rwr;
int ret;
- rwr.wr_cqe = &rdrain.cqe;
- rdrain.cqe.done = ib_drain_qp_done;
- init_completion(&rdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ecbac91b2e14..ca32057e886f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -43,20 +43,41 @@
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-
-#define BNXT_RE_PAGE_SIZE_4K BIT(12)
-#define BNXT_RE_PAGE_SIZE_8K BIT(13)
-#define BNXT_RE_PAGE_SIZE_64K BIT(16)
-#define BNXT_RE_PAGE_SIZE_2M BIT(21)
-#define BNXT_RE_PAGE_SIZE_8M BIT(23)
-#define BNXT_RE_PAGE_SIZE_1G BIT(30)
-
-#define BNXT_RE_MAX_MR_SIZE BIT(30)
+#define BNXT_RE_PAGE_SHIFT_4K (12)
+#define BNXT_RE_PAGE_SHIFT_8K (13)
+#define BNXT_RE_PAGE_SHIFT_64K (16)
+#define BNXT_RE_PAGE_SHIFT_2M (21)
+#define BNXT_RE_PAGE_SHIFT_8M (23)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+
+#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
+#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
+#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
+#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
+#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
+#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+#define BNXT_RE_PCT_RSVD_FOR_PF 50
#define BNXT_RE_UD_QP_HW_STALL 0x400000
@@ -100,6 +121,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -145,6 +167,9 @@ struct bnxt_re_dev {
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
atomic_t nq_alloc_cnt;
+ u32 is_virtfn;
+ u32 num_vfs;
+ struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7b28219eba46..77416bc61e6e 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -58,16 +58,55 @@
#include "hw_counters.h"
static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP] = "missin_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
+ [BNXT_RE_DUP_REQ] = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -76,6 +115,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ int rc = 0;
if (!port || !stats)
return -EINVAL;
@@ -97,6 +137,91 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_TX_BYTES] =
le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
}
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
+ if (rc)
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ rdev->stats.to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ rdev->stats.seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ rdev->stats.max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ rdev->stats.rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ rdev->stats.missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ rdev->stats.unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ rdev->stats.bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ rdev->stats.local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ rdev->stats.local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ rdev->stats.mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ rdev->stats.remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ rdev->stats.remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ rdev->stats.remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ rdev->stats.dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ rdev->stats.res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ rdev->stats.res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ rdev->stats.res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ rdev->stats.res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ rdev->stats.res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ rdev->stats.res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ rdev->stats.res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ rdev->stats.res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ rdev->stats.res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ rdev->stats.res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ rdev->stats.res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ rdev->stats.res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ rdev->stats.res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ rdev->stats.res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ rdev->stats.res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ rdev->stats.res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ rdev->stats.res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ rdev->stats.res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ rdev->stats.res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ rdev->stats.res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ rdev->stats.res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ rdev->stats.res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ rdev->stats.res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ rdev->stats.res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ rdev->stats.res_rx_pci_err;
+ }
+
return ARRAY_SIZE(bnxt_re_stat_name);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index be0dc0093b58..a01a922717d5 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,45 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_DUP_REQ,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2032db7db766..9b8fa77b8831 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -141,12 +141,13 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
-
- ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+ min(sizeof(dev_attr->fw_ver),
+ sizeof(ib_attr->fw_ver)));
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -247,8 +248,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
IB_PORT_VENDOR_CLASS_SUP |
IB_PORT_IP_BASED_GIDS;
- /* Max MSG size set to 2G for now */
- port_attr->max_msg_sz = 0x80000000;
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
port_attr->qkey_viol_cntr = 0;
port_attr->pkey_tbl_len = dev_attr->max_pkey;
@@ -281,6 +281,15 @@ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+ rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+}
+
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey)
{
@@ -532,7 +541,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
pbl_tbl = dma_addr;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false);
+ BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
goto fail;
@@ -1018,6 +1027,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp;
struct bnxt_re_cq *cq;
+ struct bnxt_re_srq *srq;
int rc, entries;
if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
@@ -1073,9 +1083,15 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
}
if (qp_init_attr->srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not supported");
- rc = -ENOTSUPP;
- goto fail;
+ srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
+ ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ rc = -EINVAL;
+ goto fail;
+ }
+ qp->qplib_qp.srq = &srq->qplib_srq;
+ qp->qplib_qp.rq.max_wqe = 0;
} else {
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
@@ -1280,6 +1296,237 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
}
+/* Shared Receive Queues */
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc;
+
+ if (qplib_srq->cq)
+ nq = qplib_srq->cq->nq;
+ rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
+ return rc;
+ }
+
+ if (srq->umem && !IS_ERR(srq->umem))
+ ib_umem_release(srq->umem);
+ kfree(srq);
+ atomic_dec(&rdev->srq_count);
+ if (nq)
+ nq->budget--;
+ return 0;
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq_req ureq;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct ib_umem *umem;
+ int bytes = 0;
+ struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct bnxt_re_ucontext *cntx = container_of(context,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+
+ bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(context, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ srq->umem = umem;
+ qplib_srq->nmap = umem->nmap;
+ qplib_srq->sglist = umem->sg_head.sgl;
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+
+ return 0;
+}
+
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_srq *srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc, entries;
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+
+ srq->qplib_srq.max_wqe = entries;
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
+ nq = &rdev->nq[0];
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp;
+
+ resp.srqid = srq->qplib_srq.id;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ bnxt_qplib_destroy_srq(&rdev->qplib_res,
+ &srq->qplib_srq);
+ goto exit;
+ }
+ }
+ if (nq)
+ nq->budget++;
+ atomic_inc(&rdev->srq_count);
+
+ return &srq->ib_srq;
+
+fail:
+ if (udata && srq->umem && !IS_ERR(srq->umem)) {
+ ib_umem_release(srq->umem);
+ srq->umem = NULL;
+ }
+
+ kfree(srq);
+exit:
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ break;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ return rc;
+ }
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+ break;
+ default:
+ dev_err(rdev_to_dev(rdev),
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_srq tsrq;
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ /* Get live SRQ attr */
+ tsrq.qplib_srq.id = srq->qplib_srq.id;
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = tsrq.qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0, payload_sz = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -2295,10 +2542,14 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
{
- struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_dev *rdev = cq->rdev;
int rc;
- struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
+ struct bnxt_re_cq *cq;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_re_dev *rdev;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2308,12 +2559,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
if (!IS_ERR_OR_NULL(cq->umem))
ib_umem_release(cq->umem);
- if (cq) {
- kfree(cq->cql);
- kfree(cq);
- }
atomic_dec(&rdev->cq_count);
nq->budget--;
+ kfree(cq->cql);
+ kfree(cq);
+
return 0;
}
@@ -3078,7 +3328,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3104,10 +3355,8 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
+ if (rc)
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
- return rc;
- }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3170,7 +3419,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
- goto fail;
+ goto bail;
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->ib_mr.lkey;
@@ -3192,9 +3441,10 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
return &mr->ib_mr;
fail_mr:
- bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-fail:
kfree(mr->pages);
+fail:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+bail:
kfree(mr);
return ERR_PTR(rc);
}
@@ -3248,6 +3498,46 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
+static int bnxt_re_page_size_ok(int page_shift)
+{
+ switch (page_shift) {
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
+ int page_shift)
+{
+ u64 *pbl_tbl = pbl_tbl_orig;
+ u64 paddr;
+ u64 page_mask = (1ULL << page_shift) - 1;
+ int i, pages;
+ struct scatterlist *sg;
+ int entry;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (i = 0; i < pages; i++) {
+ paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
+ if (pbl_tbl == pbl_tbl_orig)
+ *pbl_tbl++ = paddr & ~page_mask;
+ else if ((paddr & page_mask) == 0)
+ *pbl_tbl++ = paddr;
+ }
+ }
+ return pbl_tbl - pbl_tbl_orig;
+}
+
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3257,10 +3547,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl, *pbl_tbl_orig;
- int i, umem_pgs, pages, rc;
- struct scatterlist *sg;
- int entry;
+ u64 *pbl_tbl = NULL;
+ int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
@@ -3277,64 +3565,68 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
umem = ib_umem_get(ib_pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
- goto free_mr;
+ goto free_mrw;
}
mr->ib_umem = umem;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
- goto release_umem;
- }
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
dev_err(rdev_to_dev(rdev), "umem is invalid!");
rc = -EINVAL;
- goto free_mrw;
+ goto free_umem;
}
mr->qplib_mr.total_size = length;
pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
if (!pbl_tbl) {
- rc = -EINVAL;
- goto free_mrw;
+ rc = -ENOMEM;
+ goto free_umem;
}
- pbl_tbl_orig = pbl_tbl;
- if (umem->hugetlb) {
- dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
+ page_shift = umem->page_shift;
+
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
- if (umem->page_shift != PAGE_SHIFT) {
- dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
- rc = -EFAULT;
+ if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+ dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ rc = -EINVAL;
goto fail;
}
- /* Map umem buf ptrs to the PBL */
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> umem->page_shift;
- for (i = 0; i < pages; i++, pbl_tbl++)
- *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
+ if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
+ page_shift = BNXT_RE_PAGE_SHIFT_2M;
+ dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
+ 1 << page_shift);
}
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
- umem_pgs, false);
+
+ /* Map umem buf ptrs to the PBL */
+ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
+ umem_pgs, false, 1 << page_shift);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register user MR");
goto fail;
}
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
@@ -3342,11 +3634,11 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return &mr->ib_mr;
fail:
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
+free_umem:
+ ib_umem_release(umem);
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-release_umem:
- ib_umem_release(umem);
free_mr:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 1df11ed272ea..423ebe012f95 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -68,6 +68,15 @@ struct bnxt_re_ah {
struct bnxt_qplib_ah qplib_ah;
};
+struct bnxt_re_srq {
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct ib_srq ib_srq;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
+};
+
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
@@ -143,6 +152,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
@@ -164,6 +174,16 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int bnxt_re_destroy_srq(struct ib_srq *srq);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index aafc19aa5de1..508d00a5a106 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,79 @@ static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+/* SR-IOV helper functions */
+
+static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
+{
+ struct bnxt *bp;
+
+ bp = netdev_priv(rdev->en_dev->net);
+ if (BNXT_VF(bp))
+ rdev->is_virtfn = 1;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
+ u32 i;
+ u32 vf_pct;
+ u32 num_vfs;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+
+ rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ dev_attr->max_qp);
+
+ rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ /* Use max_mr from fw since max_mrw does not get set */
+ rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
+ dev_attr->max_mr);
+ rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ dev_attr->max_srq);
+ rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
+ dev_attr->max_cq);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+
+ if (rdev->num_vfs) {
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ num_vfs = 100 * rdev->num_vfs;
+ vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
+ vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
+ vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF
+ * and divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware
+ * severely restricts the number of MRs, then let PF have
+ * half and divide the rest among VFs, as for the other
+ * resource types.
+ */
+ if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
+ vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
+ else
+ vf_mrws = (rdev->qplib_ctx.mrw_count -
+ BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
+ vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ }
+ rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
+ rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
+ rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
+ rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
+ rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+}
+
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
@@ -91,6 +164,15 @@ static void bnxt_re_start(void *p)
static void bnxt_re_sriov_config(void *p, int num_vfs)
{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ rdev->num_vfs = num_vfs;
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
}
static void bnxt_re_shutdown(void *p)
@@ -417,7 +499,7 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-EINVAL);
if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_dbg(&pdev->dev,
+ dev_info(&pdev->dev,
"%s: probe error: RoCE is not supported on this device",
ROCE_DRV_MODULE_NAME);
return ERR_PTR(-ENODEV);
@@ -490,6 +572,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_port = bnxt_re_query_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
+ ibdev->get_dev_fw_str = bnxt_re_query_fw_str;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
ibdev->get_netdev = bnxt_re_get_netdev;
@@ -505,6 +588,12 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_ah = bnxt_re_query_ah;
ibdev->destroy_ah = bnxt_re_destroy_ah;
+ ibdev->create_srq = bnxt_re_create_srq;
+ ibdev->modify_srq = bnxt_re_modify_srq;
+ ibdev->query_srq = bnxt_re_query_srq;
+ ibdev->destroy_srq = bnxt_re_destroy_srq;
+ ibdev->post_srq_recv = bnxt_re_post_srq_recv;
+
ibdev->create_qp = bnxt_re_create_qp;
ibdev->modify_qp = bnxt_re_modify_qp;
ibdev->query_qp = bnxt_re_query_qp;
@@ -541,14 +630,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->dev_attr.fw_ver);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -558,12 +639,10 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(fw_rev, 0444, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
static struct device_attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_rev,
&dev_attr_hca_type
};
@@ -616,10 +695,10 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return rdev;
}
-static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
- struct creq_func_event *aeqe)
+static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ *unaffi_async)
{
- switch (aeqe->event) {
+ switch (unaffi_async->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
@@ -648,6 +727,93 @@ static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
+static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+{
+ struct ib_event event;
+
+ memset(&event, 0, sizeof(event));
+ if (qp->qplib_qp.srq) {
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ }
+
+ if (event.device && qp->ib_qp.event_handler)
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
+ struct bnxt_qplib_qp *lib_qp = obj;
+ struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ }
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_qp_event *affi_async;
+ struct creq_func_event *unaffi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+ int rc = 0;
+
+ if (!srq) {
+ dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ rc = -EINVAL;
+ goto done;
+ }
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ib_srq;
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ else
+ ib_event.event = IB_EVENT_SRQ_ERR;
+
+ if (srq->ib_srq.event_handler) {
+ /* Lock event_handler? */
+ (*srq->ib_srq.event_handler)(&ib_event,
+ srq->ib_srq.srq_context);
+ }
+done:
+ return rc;
+}
+
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *handle)
{
@@ -690,7 +856,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler, NULL);
+ &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
if (rc) {
dev_err(rdev_to_dev(rdev),
@@ -734,7 +901,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto fail;
@@ -1035,19 +1203,6 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
}
}
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
-{
- u32 i;
-
- rdev->qplib_ctx.qpc_count = BNXT_RE_MAX_QPC_COUNT;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT;
- rdev->qplib_ctx.srqc_count = BNXT_RE_MAX_SRQC_COUNT;
- rdev->qplib_ctx.cq_count = BNXT_RE_MAX_CQ_COUNT;
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-}
-
/* worker thread for polling periodic events. Now used for QoS programming*/
static void bnxt_re_worker(struct work_struct *work)
{
@@ -1070,6 +1225,9 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ /* Check whether VF or PF */
+ bnxt_re_get_sriov_func_type(rdev);
+
rc = bnxt_re_request_msix(rdev);
if (rc) {
pr_err("Failed to get MSI-X vectors: %#x\n", rc);
@@ -1101,16 +1259,18 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
(rdev->en_dev->pdev, &rdev->rcfw,
rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
- 0, &bnxt_re_aeq_handler);
+ rdev->is_virtfn, &bnxt_re_aeq_handler);
if (rc) {
pr_err("Failed to enable RCFW channel: %#x\n", rc);
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- bnxt_re_set_resource_limits(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
if (rc) {
@@ -1125,7 +1285,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto free_ctx;
}
- rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 0);
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
+ rdev->is_virtfn);
if (rc) {
pr_err("Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
@@ -1144,13 +1305,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail;
}
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- pr_info("RoCE priority not yet configured\n");
+ if (!rdev->is_virtfn) {
+ rc = bnxt_re_setup_qos(rdev);
+ if (rc)
+ pr_info("RoCE priority not yet configured\n");
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
+ set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ }
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
@@ -1176,6 +1339,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1400,7 +1564,7 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev, *next;
LIST_HEAD(to_be_deleted);
mutex_lock(&bnxt_re_dev_lock);
@@ -1408,8 +1572,11 @@ static void __exit bnxt_re_mod_exit(void)
if (!list_empty(&bnxt_re_dev_list))
list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
mutex_unlock(&bnxt_re_dev_lock);
-
- list_for_each_entry(rdev, &to_be_deleted, list) {
+ /*
+ * Cleanup the devices in reverse order so that the VF device
+ * cleanup is done before PF cleanup
+ */
+ list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 61764f7aa79b..8b5f11ac0e42 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -52,6 +52,7 @@
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -278,6 +279,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct nq_base *nqe, **nq_ptr;
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
+ int num_srqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
int budget = nq->budget;
@@ -320,6 +322,26 @@ static void bnxt_qplib_service_nq(unsigned long data)
spin_unlock_bh(&cq->compl_lock);
break;
}
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
+ DBR_DBR_TYPE_SRQ_ARMENA);
+ if (!nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ num_srqne_processed++;
+ else
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: SRQ event 0x%x not handled",
+ nqsrqe->event);
+ break;
+ }
case NQ_BASE_TYPE_DBQ_EVENT:
break;
default:
@@ -384,17 +406,19 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *, u8 event))
+ struct bnxt_qplib_srq *,
+ u8 event))
{
resource_size_t nq_base;
int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
+ if (cqn_handler)
+ nq->cqn_handler = cqn_handler;
- nq->cqn_handler = cqn_handler;
-
- nq->srqn_handler = srqn_handler;
+ if (srqn_handler)
+ nq->srqn_handler = srqn_handler;
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
@@ -410,7 +434,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
- bnxt_qplib_disable_nq(nq);
goto fail;
}
@@ -469,6 +492,238 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
return 0;
}
+/* SRQ */
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct dbr_dbr db_msg = { 0 };
+ void __iomem *db;
+ u32 sw_prod = 0;
+
+ /* Ring DB */
+ sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
+ HWQ_CMP(srq_hwq->prod, srq_hwq);
+ db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
+ DBR_DBR_INDEX_MASK);
+ db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
+ DBR_DBR_XID_MASK) | arm_type);
+ db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
+ srq->dbr_base : srq->dpi->dbr;
+ wmb(); /* barrier before db ring */
+ __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
+}
+
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_destroy_srq req;
+ struct creq_destroy_srq_resp resp;
+ u16 cmd_flags = 0;
+ int rc;
+
+ RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+ return 0;
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_create_srq req;
+ struct creq_create_srq_resp resp;
+ struct bnxt_qplib_pbl *pbl;
+ u16 cmd_flags = 0;
+ int rc, idx;
+
+ srq->hwq.max_elements = srq->max_wqe;
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
+ srq->nmap, &srq->hwq.max_elements,
+ BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
+ PAGE_SIZE, HWQ_TYPE_QUEUE);
+ if (rc)
+ goto exit;
+
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq)
+ goto fail;
+
+ RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64(srq);
+
+ req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
+ pbl = &srq->hwq.pbl[PBL_LVL_0];
+ req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
+ CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT) |
+ (pbl->pg_size == ROCE_PG_SIZE_4K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
+ pbl->pg_size == ROCE_PG_SIZE_8K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
+ pbl->pg_size == ROCE_PG_SIZE_64K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
+ pbl->pg_size == ROCE_PG_SIZE_2M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
+ pbl->pg_size == ROCE_PG_SIZE_8M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
+ pbl->pg_size == ROCE_PG_SIZE_1G ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ if (srq->threshold)
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ u32 sw_prod, sw_cons, count = 0;
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ if (count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ } else {
+ /* Deferred arming */
+ srq->arm_req = true;
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_query_srq req;
+ struct creq_query_srq_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_srq_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ /* Configure the request */
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe, **srqe_ptr;
+ struct sq_sge *hw_sge;
+ u32 sw_prod, sw_cons, count = 0;
+ int i, rc = 0, next;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
+ srq->id);
+ rc = -EINVAL;
+ spin_unlock(&srq_hwq->lock);
+ goto done;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
+ srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
+ memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+ srq_hwq->prod++;
+
+ spin_lock(&srq_hwq->lock);
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ /* retaining srq_hwq->cons for this logic
+ * actually the lock is only required to
+ * read srq_hwq->cons.
+ */
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
+ if (srq->arm_req == true && count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ }
+done:
+ return rc;
+}
+
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
@@ -737,6 +992,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
pbl->pg_size == ROCE_PG_SIZE_1G ?
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ } else {
+ /* SRQ */
+ if (qp->srq) {
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
}
if (qp->rcq)
@@ -2068,6 +2329,16 @@ done:
return rc;
}
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ srq->hwq.cons++; /* Support for SRQE counter */
+ spin_unlock(&srq->hwq.lock);
+}
+
static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct cq_res_rc *hwcqe,
struct bnxt_qplib_cqe **pcqe,
@@ -2075,6 +2346,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2102,27 +2374,46 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
@@ -2136,6 +2427,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2166,27 +2458,48 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
hwcqe->src_qp_high_srq_or_rq_wr_id) &
CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx %#x exceeded RQ max %#x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
return rc;
@@ -2218,6 +2531,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2256,26 +2570,49 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index c582d4ec8173..211b27a8f9e2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -39,6 +39,27 @@
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ void __iomem *dbr_base;
+ u64 srq_handle;
+ u32 id;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ struct scatterlist *sglist;
+ int start_idx;
+ int last_idx;
+ u32 nmap;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
+};
+
struct bnxt_qplib_sge {
u64 addr;
u32 lkey;
@@ -79,6 +100,7 @@ static inline u32 get_psne_idx(u32 val)
struct bnxt_qplib_swq {
u64 wr_id;
+ int next_idx;
u8 type;
u8 flags;
u32 start_psn;
@@ -404,29 +426,27 @@ struct bnxt_qplib_cq {
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u16 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)
- (struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)
- (struct bnxt_qplib_nq *nq,
- void *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+
+ int vector;
+ cpumask_t mask;
+ int budget;
+ bool requested;
+ struct tasklet_struct worker;
+ struct bnxt_qplib_hwq hwq;
+
+ u16 bar_reg;
+ u16 bar_reg_off;
+ u16 ring_id;
+ void __iomem *bar_reg_iomem;
+
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+ int (*srqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq,
+ u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
};
struct bnxt_qplib_nq_work {
@@ -441,8 +461,18 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *srq,
+ struct bnxt_qplib_srq *srq,
u8 event));
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bb5574adf195..8329ec6a7946 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -93,7 +93,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
- opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
@@ -615,7 +616,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *))
+ void *, void *))
{
resource_size_t res_base;
struct cmdq_init init;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2946a7cfae82..6bee6e3636ea 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -167,7 +167,7 @@ struct bnxt_qplib_rcfw {
#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *);
+ void *, void *);
u32 seq_num;
/* Bar region info */
@@ -199,9 +199,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)
- (struct bnxt_qplib_rcfw *,
- struct creq_func_event *));
+ int (*aeq_handler)(struct bnxt_qplib_rcfw *,
+ void *aeqe, void *obj));
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 4e101704e801..ad37d54affcc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -104,13 +104,12 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
- memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9543ce51a28a..c015c1861351 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -64,8 +64,28 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
+static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ char *fw_ver)
+{
+ struct cmdq_query_version req;
+ struct creq_query_version_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return;
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+ struct bnxt_qplib_dev_attr *attr, bool vf)
{
struct cmdq_query_func req;
struct creq_query_func_resp resp;
@@ -95,7 +115,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- attr->max_qp += 1;
+ if (!vf)
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -133,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
attr->max_sgid = le32_to_cpu(sb->max_gid);
- strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
@@ -150,6 +171,38 @@ bail:
return rc;
}
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct cmdq_set_func_resources req;
+ struct creq_set_func_resources_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_count);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_count);
+ req.number_of_cq = cpu_to_le32(ctx->cq_count);
+
+ req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
+ req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
+ req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
+ req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp,
+ NULL, 0);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set function resources");
+ }
+ return rc;
+}
+
/* SGID */
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
@@ -604,7 +657,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block)
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
@@ -615,6 +668,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u32 pg_size;
if (num_pbls) {
+ /* Allocate memory for the non-leaf pages to store buf ptrs.
+ * Non-leaf pages always uses system PAGE_SIZE
+ */
pg_ptrs = roundup_pow_of_two(num_pbls);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
@@ -632,6 +688,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
mr->hwq.max_elements = pages;
+ /* Use system PAGE_SIZE */
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
@@ -652,18 +709,22 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Configure the request */
if (mr->hwq.level == PBL_LVL_MAX) {
+ /* No PBL provided, just use system PAGE_SIZE */
level = 0;
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
level = mr->hwq.level + 1;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
- pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
}
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
((ilog2(pg_size) <<
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
req.access = (mr->flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
@@ -729,3 +790,73 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
0);
return 0;
}
+
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats)
+{
+ struct cmdq_query_roce_stats req;
+ struct creq_query_roce_stats_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_roce_stats_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ return -ENOMEM;
+ }
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 11322582f5e4..9d3e8b994945 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -45,7 +45,8 @@
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
struct bnxt_qplib_dev_attr {
- char fw_ver[32];
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
@@ -127,6 +128,85 @@ struct bnxt_qplib_frpl {
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
@@ -147,7 +227,10 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+ struct bnxt_qplib_dev_attr *attr, bool vf);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
@@ -155,7 +238,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block);
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
@@ -164,4 +247,6 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats);
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index c3cba6063a03..2d7ea096a247 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -954,6 +954,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1383,8 +1384,20 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
#define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
#define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
u8 access;
#define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
@@ -1392,7 +1405,21 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
#define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
#define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
- __le16 unused_1;
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
__le32 key;
__le64 pbl;
__le64 va;
@@ -1799,6 +1826,16 @@ struct cmdq_set_func_resources {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
};
/* Read hardware resource context command (24 bytes) */
@@ -2013,6 +2050,20 @@ struct creq_modify_qp_resp {
__le16 reserved48[3];
};
+/* cmdq_query_roce_stats (size:128b/16B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
/* Query QP command response (16 bytes) */
struct creq_query_qp_resp {
u8 type;
@@ -2783,6 +2834,80 @@ struct creq_query_cc_resp_sb {
__le64 reserved64_1;
};
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:2624b/328B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 21db3b48a617..4cf17c650c36 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -257,8 +257,8 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
ep->emss);
}
@@ -2733,9 +2733,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -3567,8 +3566,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_info("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
@@ -4097,9 +4096,15 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
@@ -4201,8 +4206,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..6f2b26126c64 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
{
+ if (DRAIN_CQE(cqe)) {
+ WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+ return 0;
+ }
+
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
return 0;
@@ -489,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
/*
* Special cqe for drain WR completions...
*/
- if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ if (DRAIN_CQE(hw_cqe)) {
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
*cqe = *hw_cqe;
goto skip_cqe;
@@ -566,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
ret = -EAGAIN;
goto skip_cqe;
}
- if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+ if (unlikely(!CQE_STATUS(hw_cqe) &&
+ CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
t4_set_wq_in_error(wq);
- hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
- goto proc_cqe;
+ hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
}
goto proc_cqe;
}
@@ -743,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe));
break;
- case C4IW_DRAIN_OPCODE:
- wc->opcode = IB_WC_SEND;
- break;
default:
pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index af77d128d242..7a9d0de89d6a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(c4iw_wr_log_size_order,
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-struct workqueue_struct *reg_workq;
+static struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -108,19 +108,19 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
(wq->rdev->wr_log_size - 1);
le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
- getnstimeofday(&le.poll_host_ts);
+ le.poll_host_time = ktime_get();
le.valid = 1;
le.cqe_sge_ts = CQE_TS(cqe);
if (SQ_TYPE(cqe)) {
le.qid = wq->sq.qid;
le.opcode = CQE_OPCODE(cqe);
- le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
le.wr_id = CQE_WRID_SQ_IDX(cqe);
} else {
le.qid = wq->rq.qid;
le.opcode = FW_RI_RECEIVE;
- le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
le.wr_id = CQE_WRID_MSN(cqe);
}
@@ -130,9 +130,9 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
static int wr_log_show(struct seq_file *seq, void *v)
{
struct c4iw_dev *dev = seq->private;
- struct timespec prev_ts = {0, 0};
+ ktime_t prev_time;
struct wr_log_entry *lep;
- int prev_ts_set = 0;
+ int prev_time_set = 0;
int idx, end;
#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
@@ -145,33 +145,29 @@ static int wr_log_show(struct seq_file *seq, void *v)
lep = &dev->rdev.wr_log[idx];
while (idx != end) {
if (lep->valid) {
- if (!prev_ts_set) {
- prev_ts_set = 1;
- prev_ts = lep->poll_host_ts;
+ if (!prev_time_set) {
+ prev_time_set = 1;
+ prev_time = lep->poll_host_time;
}
- seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
- "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ seq_printf(seq, "%04u: nsec %llu qid %u opcode "
+ "%u %s 0x%x host_wr_delta nsec %llu "
"post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
"poll_sge_ts 0x%llx post_poll_delta_ns %llu "
"cqe_poll_delta_ns %llu\n",
idx,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ prev_time)),
lep->qid, lep->opcode,
lep->opcode == FW_RI_RECEIVE ?
"msn" : "wrid",
lep->wr_id,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ lep->post_host_time)),
lep->post_sge_ts, lep->cqe_sge_ts,
lep->poll_sge_ts,
ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
- prev_ts = lep->poll_host_ts;
+ prev_time = lep->poll_host_time;
}
idx++;
if (idx > (dev->rdev.wr_log_size - 1))
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index a252d5c40ae3..3e9d8b277ab9 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -236,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_debug("unknown cqid 0x%x\n", qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 470f97a79ebb..cc929002c05e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -153,8 +153,8 @@ struct c4iw_hw_queue {
};
struct wr_log_entry {
- struct timespec post_host_ts;
- struct timespec poll_host_ts;
+ ktime_t post_host_time;
+ ktime_t poll_host_time;
u64 post_sge_ts;
u64 cqe_sge_ts;
u64 poll_sge_ts;
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR;
}
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe433136..de77b6027d69 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0;
}
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+ int opcode;
+
+ switch (ib_opcode) {
+ case IB_WR_SEND_WITH_INV:
+ opcode = FW_RI_SEND_WITH_INV;
+ break;
+ case IB_WR_SEND:
+ opcode = FW_RI_SEND;
+ break;
+ case IB_WR_RDMA_WRITE:
+ opcode = FW_RI_RDMA_WRITE;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ opcode = FW_RI_READ_REQ;
+ break;
+ case IB_WR_REG_MR:
+ opcode = FW_RI_FAST_REGISTER;
+ break;
+ case IB_WR_LOCAL_INV:
+ opcode = FW_RI_LOCAL_INV;
+ break;
+ default:
+ opcode = -EINVAL;
+ }
+ return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
unsigned long flag;
struct t4_cq *cq;
+ int opcode;
schp = to_c4iw_cq(qhp->ibqp.send_cq);
cq = &schp->cq;
+ opcode = ib_to_fw_opcode(wr->opcode);
+ if (opcode < 0)
+ return opcode;
+
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_OPCODE_V(opcode) |
CQE_TYPE_V(1) |
CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
+ return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int ret = 0;
+
+ while (wr) {
+ ret = complete_sq_drain_wr(qhp, wr);
+ if (ret) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return ret;
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_OPCODE_V(FW_RI_SEND) |
CQE_TYPE_V(0) |
CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ while (wr) {
+ complete_rq_drain_wr(qhp, wr);
+ wr = wr->next;
+ }
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -868,9 +930,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- complete_sq_drain_wr(qhp, wr);
+ err = complete_sq_drain_wrs(qhp, wr, bad_wr);
return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
@@ -975,7 +1042,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(&swsqe->host_ts);
+ swsqe->host_time = ktime_get();
}
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1011,9 +1078,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- complete_rq_drain_wr(qhp, wr);
+ complete_rq_drain_wrs(qhp, wr);
return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
@@ -1045,8 +1117,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(
- &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
+ ktime_get();
}
wqe->recv.opcode = FW_RI_RECV_WR;
@@ -1285,21 +1357,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
- if (t4_clear_cq_armed(&rchp->cq) &&
- (rq_flushed || sq_flushed)) {
+ if ((rq_flushed || sq_flushed) &&
+ t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
- if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
- if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e9ea94268d51..8369c7c8de83 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -197,6 +197,11 @@ struct t4_cqe {
#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+#define CQE_DRAIN_S 10
+#define CQE_DRAIN_M 0x1
+#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
+
#define CQE_STATUS_S 5
#define CQE_STATUS_M 0x1F
#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
@@ -271,7 +277,7 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
@@ -312,7 +318,7 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 4f057e8ffe50..6660f920f42e 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6518,11 +6518,12 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /*
- * Take the 8051 out of reset, wait until 8051 is ready, and set host
- * version bit.
- */
- release_and_wait_ready_8051_firmware(dd);
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -8564,23 +8565,27 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
- * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
- * will still continue executing.
- *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
- lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+ mutex_lock(&dd->dc8051_lock);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8681,29 +8686,6 @@ static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
- return return_code;
-}
-
-/*
- * Returns:
- * < 0 = Linux error, not able to get access
- * > 0 = 8051 command RETURN_CODE
- */
-static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
- return_code = _do_8051_command(dd, type, in_data, out_data);
-
-fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8713,17 +8695,16 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
- lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8732,18 +8713,6 @@ static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- return_code = _load_8051_config(dd, field_id, lane_id, config_data);
- mutex_unlock(&dd->dc8051_lock);
-
- return return_code;
-}
-
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8859,14 +8828,13 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
- lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -9270,6 +9238,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ goto set_local_link_attributes_fail;
+ }
+
/*
* DC supports continuous updates.
*/
@@ -14944,9 +14920,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
num_vls > HFI1_MAX_VLS_SUPPORTED) {
- hfi1_early_err(&pdev->dev,
- "Invalid num_vls %u, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
+ dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
num_vls = HFI1_MAX_VLS_SUPPORTED;
}
ppd->vls_supported = num_vls;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 133e313feca4..21fca8ec5076 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -508,6 +508,7 @@
#define DOWN_REMOTE_REASON_SHIFT 16
#define DOWN_REMOTE_REASON_MASK 0xff
+#define HOST_INTERFACE_VERSION 1
#define HOST_INTERFACE_VERSION_SHIFT 16
#define HOST_INTERFACE_VERSION_MASK 0xff
@@ -713,7 +714,6 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 4f65ac671044..067b29f35f21 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -159,22 +159,6 @@ static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
}
-const char *get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
- return iname;
-}
-
-const char *get_card_name(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return get_unit_name(dd->unit);
-}
-
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
{
struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c38b06..bd6f03cc5ee0 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -74,7 +74,7 @@
static int hfi1_file_open(struct inode *inode, struct file *fp);
static int hfi1_file_close(struct inode *inode, struct file *fp);
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
@@ -102,8 +102,8 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo,
struct hfi1_ctxtdata **cd);
static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
-static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
-static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
@@ -607,10 +607,10 @@ static int vma_fault(struct vm_fault *vmf)
return 0;
}
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned pollflag;
+ __poll_t pollflag;
uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
if (!uctxt)
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
}
if (ret) {
- hfi1_rcd_put(fd->uctxt);
- fd->uctxt = NULL;
spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+ hfi1_rcd_put(fd->uctxt);
+ fd->uctxt = NULL;
}
return ret;
@@ -1425,13 +1425,13 @@ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
return ret;
}
-static unsigned int poll_urgent(struct file *fp,
+static __poll_t poll_urgent(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
@@ -1448,13 +1448,13 @@ static unsigned int poll_urgent(struct file *fp,
return pollflag;
}
-static unsigned int poll_next(struct file *fp,
+static __poll_t poll_next(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 98868df78a7e..2b57ba70ddd6 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -68,7 +68,6 @@
#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
-#define HOST_INTERFACE_VERSION 1
MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
@@ -976,46 +975,6 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
- * Clear all reset bits, releasing the 8051.
- * Wait for firmware to be ready to accept host requests.
- * Then, set host version bit.
- *
- * This function executes even if the 8051 is in reset mode when
- * dd->dc_shutdown == 1.
- *
- * Expects dd->dc8051_lock to be held.
- */
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
-{
- int ret;
-
- lockdep_assert_held(&dd->dc8051_lock);
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
- /*
- * Wait for firmware to be ready to accept host
- * requests.
- */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
- dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
- get_firmware_state(dd));
- return ret;
- }
-
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1080,22 +1039,31 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
/*
- * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
- * Then, set host version bit.
*/
- mutex_lock(&dd->dc8051_lock);
- ret = release_and_wait_ready_8051_firmware(dd);
- mutex_unlock(&dd->dc8051_lock);
- if (ret)
- return ret;
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4a9b4d7efe63..b42c22292597 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1131,7 +1131,6 @@ struct hfi1_devdata {
u16 pcie_lnkctl;
u16 pcie_devctl2;
u32 pci_msix0;
- u32 pci_lnkctl3;
u32 pci_tph2;
/*
@@ -1624,7 +1623,7 @@ static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
* the 'error info' for this failure.
*/
static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
- u16 slid)
+ u32 slid)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -1972,8 +1971,6 @@ int get_platform_config_field(struct hfi1_devdata *dd,
table_type, int table_index, int field_index,
u32 *data, u32 len);
-const char *get_unit_name(int unit);
-const char *get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -2123,39 +2120,42 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err_ratelimited(dd, fmt, ...) \
dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_warn_ratelimited(dd, fmt, ...) \
dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_info(dd, fmt, ...) \
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_info_ratelimited(dd, fmt, ...) \
dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_dbg(dd, fmt, ...) \
dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
- get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 8e3b3e7d829a..9b128268fb28 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1272,6 +1272,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+
/*
* Initialize all locks for the device. This needs to be as early as
* possible so locks are usable.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index cf8dba34fe30..34547a48a445 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4348,11 +4348,7 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- /*
- * On OPA devices it is okay to lose the upper 16 bits of LID as this
- * information is obtained elsewhere. Mask off the upper 16 bits.
- */
- ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
+ ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
return 1;
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index e7b3ce123da6..70aceefe14d5 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -77,6 +77,7 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = mmu_notifier_range_start,
};
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 09e50fd2a08f..8c7e7a60b715 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
if (ret)
goto error;
- ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- dd->pci_lnkctl3);
- if (ret)
- goto error;
-
- ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
- if (ret)
- goto error;
-
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
return 0;
error:
@@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
if (ret)
goto error;
- ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- &dd->pci_lnkctl3);
- if (ret)
- goto error;
-
- ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
- if (ret)
- goto error;
-
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ &dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
return 0;
error:
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4b01ccd895b4..5507910e8b8a 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -556,6 +556,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
struct sdma_engine *sde;
struct send_context *send_context;
struct rvt_ack_entry *e = NULL;
+ struct rvt_srq *srq = qp->ibqp.srq ?
+ ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
@@ -563,7 +565,7 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
if (qp->s_ack_queue)
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -610,7 +612,11 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
/* ack queue information */
e ? e->opcode : 0,
e ? e->psn : 0,
- e ? e->lpsn : 0);
+ e ? e->lpsn : 0,
+ qp->r_min_rnr_timer,
+ srq ? "SRQ" : "RQ",
+ srq ? srq->rq.size : qp->r_rq.size
+ );
}
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index af5f7936f7e5..14cc212a21c7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
@@ -843,11 +841,11 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
/* Convert dwords to flits */
len = (*hwords + *nwords) >> 1;
- hfi1_make_16b_hdr(hdr,
- ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr),
+ hfi1_make_16b_hdr(hdr, ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1)),
opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
- 16B),
- len, pkey, becn, 0, l4, sc5);
+ 16B), len, pkey, becn, 0, l4, sc5);
bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
bth0 |= extra_bytes << 20;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
}
/* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn);
return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e331ea..13b994738f41 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89b5fc8..61c130dbed10 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask];
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee04821..132b63e787d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091eccca..deb184574395 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a38785e224cc..b8776a362a91 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1486,7 +1486,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
- mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
/*
* sm_lid of 0xFFFF needs special handling so that it can
@@ -1844,7 +1844,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
- size_t lcpysz = IB_DEVICE_NAME_MAX;
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
@@ -1872,8 +1871,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
*/
if (!ib_hfi1_sys_image_guid)
ib_hfi1_sys_image_guid = ibdev->node_guid;
- lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
- strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dev.parent = &dd->pcidev->dev;
@@ -1893,7 +1890,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index ff426a625e13..97bf2cd1cacb 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 1085cb249bc1..9ebe839d8b24 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -103,6 +103,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
context->out_param = out_param;
complete(&context->done);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_event);
/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index b1c94223c28b..9549ae51a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -88,6 +88,16 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+ HNS_ROCE_CMD_QUERY_AEQC = 0x82,
+ HNS_ROCE_CMD_DESTROY_AEQC = 0x83,
+ HNS_ROCE_CMD_CREATE_CEQC = 0x90,
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
};
enum {
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 7ecb7a4147a8..dd67fafd0c40 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -376,6 +376,12 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
+#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
+
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
@@ -385,4 +391,9 @@
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
+#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
+#define ROCEE_VF_ABN_INT_ST_REG 0x13004
+#define ROCEE_VF_ABN_INT_EN_REG 0x13008
+#define ROCEE_VF_EVENT_INT_EN_REG 0x1300c
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 2111b57a3489..bccc9b54c9ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -196,15 +196,14 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- if (hr_dev->eq_table.eq) {
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
- }
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -460,6 +459,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
++cq->arm_sn;
cq->comp(cq);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_completion);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
@@ -482,6 +482,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_event);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b154ce40cded..42c3b5a2d441 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,12 +62,16 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_MAX_IRQ_NUM 34
+#define HNS_ROCE_MAX_IRQ_NUM 128
-#define HNS_ROCE_COMP_VEC_NUM 32
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
-#define HNS_ROCE_AEQE_VEC_NUM 1
-#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+#define HNS_ROCE_CEQ 0
+#define HNS_ROCE_AEQ 1
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
@@ -130,6 +134,7 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+ HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
@@ -173,6 +178,7 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
};
enum hns_roce_mtt_type {
@@ -441,6 +447,21 @@ struct hns_roce_cmd_mailbox {
struct hns_roce_dev;
+struct hns_roce_rinl_sge {
+ void *addr;
+ u32 len;
+};
+
+struct hns_roce_rinl_wqe {
+ struct hns_roce_rinl_sge *sg_list;
+ u32 sge_cnt;
+};
+
+struct hns_roce_rinl_buf {
+ struct hns_roce_rinl_wqe *wqe_list;
+ u32 wqe_cnt;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -462,7 +483,9 @@ struct hns_roce_qp {
u8 resp_depth;
u8 state;
u32 access_flags;
+ u32 atomic_rd_en;
u32 pkey_index;
+ u32 qkey;
void (*event)(struct hns_roce_qp *,
enum hns_roce_event);
unsigned long qpn;
@@ -472,6 +495,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+
+ struct hns_roce_rinl_buf rq_inl_buf;
};
struct hns_roce_sqp {
@@ -485,6 +510,45 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_ceqe {
+ u32 comp;
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
void __iomem *doorbell;
@@ -498,11 +562,31 @@ struct hns_roce_eq {
int log_page_size;
int cons_index;
struct hns_roce_buf_list *buf_list;
+ int over_ignore;
+ int coalesce;
+ int arm_st;
+ u64 eqe_ba;
+ int eqe_ba_pg_sz;
+ int eqe_buf_pg_sz;
+ int hop_num;
+ u64 *bt_l0; /* Base address table for L0 */
+ u64 **bt_l1; /* Base address table for L1 */
+ u64 **buf;
+ dma_addr_t l0_dma;
+ dma_addr_t *l1_dma;
+ dma_addr_t *buf_dma;
+ u32 l0_last_num; /* L0 last chunk num */
+ u32 l1_last_num; /* L1 last chunk num */
+ int eq_max_cnt;
+ int eq_period;
+ int shift;
+ dma_addr_t cur_eqe_ba;
+ dma_addr_t nxt_eqe_ba;
};
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base;
+ void __iomem **eqc_base; /* only for hw v1 */
};
struct hns_roce_caps {
@@ -528,7 +612,7 @@ struct hns_roce_caps {
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
- int num_comp_vectors; /* 32 ceq */
+ int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
@@ -550,7 +634,7 @@ struct hns_roce_caps {
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth;
- int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
@@ -574,6 +658,9 @@ struct hns_roce_caps {
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
+ u32 eqe_ba_pg_sz;
+ u32 eqe_buf_pg_sz;
+ u32 eqe_hop_num;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
@@ -623,6 +710,8 @@ struct hns_roce_hw {
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int (*init_eq)(struct hns_roce_dev *hr_dev);
+ void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
};
struct hns_roce_dev {
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
deleted file mode 100644
index d184431e2bf5..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_eq.h"
-
-static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
-{
- roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
- (req_not << eq->log_entries), eq->doorbell);
- /* Memory barrier */
- mb();
-}
-
-static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw(eq))) {
- dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- /* Memory barrier */
- rmb();
-
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_db_overflow_handle(hr_dev, aeqe);
- break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ 0x%lx overflow.\n",
- roce_get_field(aeqe->event.ce_event.ceqe,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to zero\n"
- );
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw(eq))) {
- /* Memory barrier */
- rmb();
- cqn = roce_get_field(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to zero\n");
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return ceqes_found;
-}
-
-static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &eq->hr_dev->pdev->dev;
- int eqovf_found = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- int i = 0;
-
- /**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
-
- if (roce_get_bit(aeshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
-
- if (roce_get_bit(ceshift_val,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- eqovf_found++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return eqovf_found;
-}
-
-static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- int eqes_found = 0;
-
- if (likely(eq->type_flag == HNS_ROCE_CEQ))
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- eqes_found = hns_roce_ceq_int(hr_dev, eq);
- else if (likely(eq->type_flag == HNS_ROCE_AEQ))
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- eqes_found = hns_roce_aeq_int(hr_dev, eq);
- else
- /* AEQ queue overflow irq */
- eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
-
- return eqes_found;
-}
-
-static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
-{
- int int_work = 0;
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
-
- int_work = hns_roce_eq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- u32 val;
-
- val = readl(eqc);
-
- if (enable_flag)
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- writel(val, eqc);
-}
-
-static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqconsindx_val = 0;
- u32 eqcuridx_val = 0;
- u32 eqshift_val = 0;
- int num_bas = 0;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
- }
- eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i = i - 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int i = 0;
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
-{
- int i = 0;
- u32 aemask_val;
- int masken = 0;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
-{
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-}
-
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq = NULL;
- int eq_num = 0;
- int ret = 0;
- int i = 0;
- int j = 0;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth[i];
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_ceqe);
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_aeqe);
- }
- }
-
- /* Disable irq */
- hns_roce_int_mask_en(hr_dev);
-
- /* Configure CE irq interval and burst num */
- hns_roce_ce_int_default_cfg(hr_dev);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < eq_num; j++) {
- ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
- 0, hr_dev->irq_names[j], eq_table->eq + j);
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j = j - 1; j >= 0; j--)
- free_irq(eq_table->eq[j].irq, eq_table->eq + j);
-
-err_create_eq_fail:
- for (i = i - 1; i >= 0; i--)
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- int i;
- int eq_num;
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
- }
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
deleted file mode 100644
index c6d212d12e03..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_EQ_H
-#define _HNS_ROCE_EQ_H
-
-#define HNS_ROCE_CEQ 1
-#define HNS_ROCE_AEQ 2
-
-#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
-#define HNS_ROCE_CEQC_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define EQ_ENABLE 1
-#define EQ_DISABLE 0
-#define CONS_INDEX_MASK 0xffff
-
-#define CEQ_REG_OFFSET 0x18
-
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
-struct hns_roce_aeqe {
- u32 asyn;
- union {
- struct {
- u32 qp;
- u32 rsv0;
- u32 rsv1;
- } qp_event;
-
- struct {
- u32 cq;
- u32 rsv0;
- u32 rsv1;
- } cq_event;
-
- struct {
- u32 port;
- u32 rsv0;
- u32 rsv1;
- } port_event;
-
- struct {
- u32 ceqe;
- u32 rsv0;
- u32 rsv1;
- } ce_event;
-
- struct {
- __le64 out_param;
- __le16 token;
- u8 status;
- u8 rsv0;
- } __packed cmd;
- } event;
-};
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
- (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
- (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
- (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
- (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
- (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
-
-struct hns_roce_ceqe {
- union {
- int comp;
- } ceqe;
-};
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
-
-#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index af27168faf0f..21ca9fa7c9d1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
@@ -774,7 +775,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto create_lp_qp_failed;
}
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
IB_QPS_INIT, IB_QPS_RTR);
if (ret) {
dev_err(dev, "modify qp failed(%d)!\n", ret);
@@ -1492,9 +1493,9 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
caps->num_uars = HNS_ROCE_V1_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
@@ -1529,10 +1530,8 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_ports + 1;
}
- for (i = 0; i < caps->num_comp_vectors; i++)
- caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
-
- caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
@@ -2312,15 +2311,16 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immediate_data));
break;
case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
if (roce_get_bit(cqe->cqe_byte_4,
CQE_BYTE_4_IMM_INDICATOR_S)) {
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(
- cqe->immediate_data);
+ wc->ex.imm_data = cpu_to_be32(
+ le32_to_cpu(cqe->immediate_data));
} else {
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
@@ -3960,6 +3960,732 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
+static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
+ (req_not << eq->log_entries), eq->doorbell);
+}
+
+static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v1(eq))) {
+
+ /* Make sure we read the AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ }
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v1(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return ceqes_found;
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_ceq_int(hr_dev, eq);
+ else
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = &hr_dev->pdev->dev;
+ int int_work = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i;
+
+ /*
+ * Abnormal interrupt:
+ * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
+ * interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ /* AEQE overflow */
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ int_work++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
+{
+ u32 aemask_val;
+ int masken = 0;
+ int i;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+ int i;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq;
+ int irq_num;
+ int eq_num;
+ int ret;
+ int i, j;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_v1_int_mask_enable(hr_dev);
+
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < eq_num)
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_eq, 0,
+ hr_dev->irq_names[j],
+ &eq_table->eq[j]);
+ else
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_abn, 0,
+ hr_dev->irq_names[j], hr_dev);
+
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j -= 1; j >= 0; j--)
+ free_irq(hr_dev->irq[j], &eq_table->eq[j]);
+
+err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(hr_dev->irq[i], &eq_table->eq[i]);
+
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+ for (i = eq_num; i < irq_num; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
@@ -3983,6 +4709,8 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
+ .init_eq = hns_roce_v1_init_eq_table,
+ .cleanup_eq = hns_roce_v1_cleanup_eq_table,
};
static const struct of_device_id hns_roce_of_match[] = {
@@ -4060,10 +4788,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
@@ -4132,14 +4856,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
+ HNS_ROCE_V1_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 21a07ef0afc9..b44ddd239060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -60,8 +60,13 @@
#define HNS_ROCE_V1_GID_NUM 16
#define HNS_ROCE_V1_RESV_QP 8
-#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
-#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+#define HNS_ROCE_V1_MAX_IRQ_NUM 34
+#define HNS_ROCE_V1_COMP_VEC_NUM 32
+#define HNS_ROCE_V1_AEQE_VEC_NUM 1
+#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
+
+#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
+#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
@@ -159,6 +164,41 @@
#define SDB_INV_CNT_OFFSET 8
#define SDB_ST_CMP_VAL 8
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define CEQ_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+
struct hns_roce_cq_context {
u32 cqc_byte_4;
u32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8e18445714a9..256fe110107a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <net/addrconf.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -51,32 +52,106 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ void *wqe, unsigned int *sge_ind,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int i;
+
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) {
+ *bad_wr = wr;
+ dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+ 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ if (likely(wr->sg_list[i + 2].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + 2 + i);
+ dseg++;
+ (*sge_ind)++;
+ }
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
unsigned int sge_ind = 0;
- unsigned int wqe_sz = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
+ bool loopback;
int ret = 0;
+ u8 *smac;
int nreq;
int i;
- if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL;
return -EOPNOTSUPP;
}
- if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+ qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
@@ -106,161 +181,255 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->wr_id;
owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
- for (i = 0; i < wr->num_sge; i++)
- rc_sq_wqe->msg_len += wr->sg_list[i].length;
- rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
+ ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+ for (i = 0; i < wr->num_sge; i++)
+ ud_sq_wqe->msg_len += wr->sg_list[i].length;
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+ ud_sq_wqe->immtdata = send_ieth(wr);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey =
+ cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ roce_set_field(ud_sq_wqe->byte_32,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S,
+ ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+ ah->av.vlan);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S,
+ qp->port);
+
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
+ hns_get_gid_index(hr_dev, qp->phy_port,
+ ah->av.gid_index));
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
+ GID_LEN_V2);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < wr->num_sge; i++) {
+ set_data_seg_v2(dseg + i, wr->sg_list + i);
+ sge_ind++;
+ }
+
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
- break;
- case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
- break;
- case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
- break;
- case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
- break;
- default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
- break;
- }
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (rc_sq_wqe->msg_len >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len,
- hr_dev->caps.max_sq_inline);
- goto out;
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
}
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *)wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- wqe_sz += wr->sg_list[i].length;
- }
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
+ &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ ind++;
} else {
- if (wr->num_sge <= 2) {
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
- } else {
- roce_set_field(rc_sq_wqe->byte_20,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < 2; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
-
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- set_data_seg_v2(dseg + i,
- wr->sg_list + 2 + i);
- sge_ind++;
- }
- }
-
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
- wqe_sz += wr->num_sge *
- sizeof(struct hns_roce_v2_wqe_data_seg);
+ dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ return -EOPNOTSUPP;
}
- ind++;
}
out:
@@ -299,6 +468,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
@@ -347,6 +517,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dseg[i].addr = 0;
}
+ /* rq support inline data */
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -908,9 +1086,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
- caps->num_aeq_vectors = 1;
- caps->num_comp_vectors = 63;
- caps->num_other_vectors = 0;
+ caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -955,12 +1133,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
- HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+ HNS_ROCE_CAP_FLAG_RQ_INLINE;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1382,6 +1566,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
+ V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
@@ -1422,6 +1608,15 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M,
+ V2_CQC_BYTE_56_CQ_PERIOD_S,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -1457,6 +1652,40 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
+static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ u32 wr_num, wr_cnt, sge_num;
+ u32 sge_cnt, data_len, size;
+ void *wqe_buf;
+
+ wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+ sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ data_len = wc->byte_len;
+
+ for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
+ size = min(sge_list[sge_cnt].len, data_len);
+ memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
+
+ data_len -= size;
+ wqe_buf += size;
+ }
+
+ if (data_len) {
+ wc->status = IB_WC_LOC_LEN_ERR;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -1469,6 +1698,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
u32 opcode;
u32 status;
int qpn;
+ int ret;
/* Find cqe according to consumer index */
cqe = next_cqe_sw_v2(hr_cq);
@@ -1636,7 +1866,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1645,18 +1875,29 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
+ if ((wc->qp->qp_type == IB_QPT_RC ||
+ wc->qp->qp_type == IB_QPT_UC) &&
+ (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
+ if (ret)
+ return -EAGAIN;
+ }
+
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1670,6 +1911,21 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
+ wc->port_num = roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+ memcpy(wc->smac, cqe->smac, 4);
+ wc->smac[4] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_4_M,
+ V2_CQE_BYTE_28_SMAC_4_S);
+ wc->smac[5] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_5_M,
+ V2_CQE_BYTE_28_SMAC_5_S);
+ wc->vlan_id = 0xffff;
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
}
return 0;
@@ -1859,8 +2115,39 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
+ int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -1877,9 +2164,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -1944,18 +2240,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -2176,9 +2467,17 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -2239,7 +2538,7 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -2255,10 +2554,10 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
- if (attr_mask & IB_QP_PKEY_INDEX)
- context->qkey_xrcd = attr->pkey_index;
- else
- context->qkey_xrcd = hr_qp->pkey_index;
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ }
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
@@ -2354,7 +2653,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
+ hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -2463,11 +2763,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -2511,8 +2814,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
+ else
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
@@ -2557,12 +2865,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
-
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2625,13 +2927,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ context->sq_cur_sge_blk_addr =
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- hr_qp->sq.max_gs > 2 ?
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
(mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -2766,6 +3069,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
return 0;
}
@@ -2794,7 +3105,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -2829,6 +3141,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
}
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2845,6 +3160,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
@@ -3098,6 +3416,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
return 0;
}
@@ -3162,6 +3485,1146 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+{
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ if (eq->type_flag == HNS_ROCE_AEQ) {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ } else {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
+ HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ }
+
+ roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+ hns_roce_write64_k(doorbell, eq->doorbell);
+}
+
+static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local work queue catastrophic error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local access violation work queue error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid request local work queue error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 cqn;
+
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe;
+
+ if (!eq->hop_num)
+ aeqe = get_aeqe_v2(eq, eq->cons_index);
+ else
+ aeqe = mhop_get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqe_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v2(eq))) {
+
+ /* Make sure we read AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_warn(dev, "SRQ not support.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
+ break;
+ default:
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ ++eq->cons_index;
+ aeqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+ return aeqe_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
+ buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+
+ if (!eq->hop_num)
+ ceqe = get_ceqe_v2(eq, eq->cons_index);
+ else
+ ceqe = mhop_get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_ceqe *ceqe;
+ int ceqe_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v2(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_V2_CEQE_COMP_CQN_M,
+ HNS_ROCE_V2_CEQE_COMP_CQN_S);
+
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+
+ return ceqe_found;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* Completion event interrupt */
+ int_work = hns_roce_v2_ceq_int(hr_dev, eq);
+ else
+ /* Asychronous event interrupt */
+ int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = hr_dev->dev;
+ int int_work = 0;
+ u32 int_st;
+ u32 int_en;
+
+ /* Abnormal interrupt */
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+ dev_err(dev, "BUS ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+ dev_err(dev, "OTHER ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else
+ dev_err(dev, "There is no abnormal irq found!\n");
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ int eq_num, int enable_flag)
+{
+ int i;
+
+ if (enable_flag == EQ_ENABLE) {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M);
+ } else {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
+ }
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (eqn < hr_dev->caps.num_comp_vectors)
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_CEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ else
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_AEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+}
+
+static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ u64 idx;
+ u64 size;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ int eqe_alloc;
+ int ba_num;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
+ buf_chk_sz;
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ dma_free_coherent(dev, (unsigned int)(eq->entries *
+ eq->eqe_size), eq->bt_l0, eq->l0_dma);
+ return;
+ }
+
+ /* hop_num = 1 or hop = 2 */
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ if (mhop_num == 1) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ if (i == eq->l0_last_num - 1) {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ dma_free_coherent(dev, size, eq->buf[i],
+ eq->buf_dma[i]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ }
+ } else if (mhop_num == 2) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * (bt_chk_sz / 8) + j;
+ if ((i == eq->l0_last_num - 1)
+ && j == eq->l1_last_num - 1) {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ dma_free_coherent(dev, size,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+ kfree(eq->buf_dma);
+ kfree(eq->buf);
+ kfree(eq->l1_dma);
+ kfree(eq->bt_l1);
+ eq->buf_dma = NULL;
+ eq->buf = NULL;
+ eq->l1_dma = NULL;
+ eq->bt_l1 = NULL;
+}
+
+static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ u32 buf_chk_sz;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ if (hr_dev->caps.eqe_hop_num) {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ return;
+ }
+
+ if (eq->buf_list)
+ dma_free_coherent(hr_dev->dev, buf_chk_sz,
+ eq->buf_list->buf, eq->buf_list->map);
+}
+
+static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ void *mb_buf)
+{
+ struct hns_roce_eq_context *eqc;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+ /* init eqc */
+ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
+ eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
+ eq->shift = ilog2((unsigned int)eq->entries);
+
+ if (!eq->hop_num)
+ eq->eqe_ba = eq->buf_list->map;
+ else
+ eq->eqe_ba = eq->l0_dma;
+
+ /* set eqc state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQ_ST_M,
+ HNS_ROCE_EQC_EQ_ST_S,
+ HNS_ROCE_V2_EQ_STATE_VALID);
+
+ /* set eqe hop num */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_HOP_NUM_M,
+ HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
+
+ /* set eqc over_ignore */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_OVER_IGNORE_M,
+ HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
+
+ /* set eqc coalesce */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_COALESCE_M,
+ HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
+
+ /* set eqc arm_state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_ARM_ST_M,
+ HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
+
+ /* set eqn */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQN_M,
+ HNS_ROCE_EQC_EQN_S, eq->eqn);
+
+ /* set eqe_cnt */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQE_CNT_M,
+ HNS_ROCE_EQC_EQE_CNT_S,
+ HNS_ROCE_EQ_INIT_EQE_CNT);
+
+ /* set eqe_ba_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BA_PG_SZ_M,
+ HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+
+ /* set eqe_buf_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BUF_PG_SZ_M,
+ HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+
+ /* set eq_producer_idx */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_PROD_INDX_M,
+ HNS_ROCE_EQC_PROD_INDX_S,
+ HNS_ROCE_EQ_INIT_PROD_IDX);
+
+ /* set eq_max_cnt */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_MAX_CNT_M,
+ HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
+
+ /* set eq_period */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_PERIOD_M,
+ HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
+
+ /* set eqe_report_timer */
+ roce_set_field(eqc->eqe_report_timer,
+ HNS_ROCE_EQC_REPORT_TIMER_M,
+ HNS_ROCE_EQC_REPORT_TIMER_S,
+ HNS_ROCE_EQ_INIT_REPORT_TIMER);
+
+ /* set eqe_ba [34:3] */
+ roce_set_field(eqc->eqe_ba0,
+ HNS_ROCE_EQC_EQE_BA_L_M,
+ HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+
+ /* set eqe_ba [64:35] */
+ roce_set_field(eqc->eqe_ba1,
+ HNS_ROCE_EQC_EQE_BA_H_M,
+ HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+
+ /* set eq shift */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_SHIFT_M,
+ HNS_ROCE_EQC_SHIFT_S, eq->shift);
+
+ /* set eq MSI_IDX */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_MSI_INDX_M,
+ HNS_ROCE_EQC_MSI_INDX_S,
+ HNS_ROCE_EQ_INIT_MSI_IDX);
+
+ /* set cur_eqe_ba [27:12] */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+
+ /* set cur_eqe_ba [59:28] */
+ roce_set_field(eqc->byte_32,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+
+ /* set cur_eqe_ba [63:60] */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+
+ /* set eq consumer idx */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CONS_INDX_M,
+ HNS_ROCE_EQC_CONS_INDX_S,
+ HNS_ROCE_EQ_INIT_CONS_IDX);
+
+ /* set nex_eqe_ba[43:12] */
+ roce_set_field(eqc->nxt_eqe_ba0,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+
+ /* set nex_eqe_ba[63:44] */
+ roce_set_field(eqc->nxt_eqe_ba1,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
+}
+
+static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ int eq_alloc_done = 0;
+ int eq_buf_cnt = 0;
+ int eqe_alloc;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ u64 size;
+ u64 idx;
+ int ba_num;
+ int bt_num;
+ int record_i;
+ int record_j;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
+ / buf_chk_sz;
+ bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ if (eq->entries > buf_chk_sz / eq->eqe_size) {
+ dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
+ eq->entries);
+ return -EINVAL;
+ }
+ eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
+ &(eq->l0_dma), GFP_KERNEL);
+ if (!eq->bt_l0)
+ return -ENOMEM;
+
+ eq->cur_eqe_ba = eq->l0_dma;
+ eq->nxt_eqe_ba = 0;
+
+ memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
+
+ return 0;
+ }
+
+ eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
+ if (!eq->buf_dma)
+ return -ENOMEM;
+ eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
+ if (!eq->buf)
+ goto err_kcalloc_buf;
+
+ if (mhop_num == 2) {
+ eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
+ if (!eq->l1_dma)
+ goto err_kcalloc_l1_dma;
+
+ eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
+ if (!eq->bt_l1)
+ goto err_kcalloc_bt_l1;
+ }
+
+ /* alloc L0 BT */
+ eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
+ if (!eq->bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 1) {
+ if (ba_num > (bt_chk_sz / 8))
+ dev_err(dev, "ba_num %d is too large for 1 hop\n",
+ ba_num);
+
+ /* alloc buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ }
+ eq->buf[i] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[i]),
+ GFP_KERNEL);
+ if (!eq->buf[i])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[i], 0, size);
+ *(eq->bt_l0 + i) = eq->buf_dma[i];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+
+ } else if (mhop_num == 2) {
+ /* alloc L1 BT and buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
+ &(eq->l1_dma[i]),
+ GFP_KERNEL);
+ if (!eq->bt_l1[i])
+ goto err_dma_alloc_l1;
+ *(eq->bt_l0 + i) = eq->l1_dma[i];
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ }
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
+ if (!eq->buf[idx])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[idx], 0, size);
+ *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num) {
+ eq_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (eq_alloc_done)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+ }
+
+ eq->l0_last_num = i + 1;
+ if (mhop_num == 2)
+ eq->l1_last_num = j + 1;
+
+ return 0;
+
+err_dma_alloc_l1:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ goto err_dma_alloc_l0;
+
+err_dma_alloc_buf:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+
+ if (mhop_num == 1)
+ for (i -= i; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ else if (mhop_num == 2) {
+ record_i = i;
+ record_j = j;
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ if (i == record_i && j >= record_j)
+ break;
+
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+
+err_dma_alloc_l0:
+ kfree(eq->bt_l1);
+ eq->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(eq->l1_dma);
+ eq->l1_dma = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(eq->buf);
+ eq->buf = NULL;
+
+err_kcalloc_buf:
+ kfree(eq->buf_dma);
+ eq->buf_dma = NULL;
+
+ return -ENOMEM;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ unsigned int eq_cmd)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u32 buf_chk_sz = 0;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!hr_dev->caps.eqe_hop_num) {
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+
+ eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
+ GFP_KERNEL);
+ if (!eq->buf_list) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &(eq->buf_list->map),
+ GFP_KERNEL);
+ if (!eq->buf_list->buf) {
+ ret = -ENOMEM;
+ goto err_alloc_buf;
+ }
+
+ memset(eq->buf_list->buf, 0, buf_chk_sz);
+ } else {
+ ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+ }
+
+ hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
+ eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+ goto err_cmd_mbox;
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_cmd_mbox:
+ if (!hr_dev->caps.eqe_hop_num)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ else {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ goto free_cmd_mbox;
+ }
+
+err_alloc_buf:
+ kfree(eq->buf_list);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_eq *eq;
+ unsigned int eq_cmd;
+ int irq_num;
+ int eq_num;
+ int other_num;
+ int comp_num;
+ int aeq_num;
+ int i, j, k;
+ int ret;
+
+ other_num = hr_dev->caps.num_other_vectors;
+ comp_num = hr_dev->caps.num_comp_vectors;
+ aeq_num = hr_dev->caps.num_aeq_vectors;
+
+ eq_num = comp_num + aeq_num;
+ irq_num = eq_num + other_num;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_failed_kzalloc;
+ }
+ }
+
+ /* create eq */
+ for (j = 0; j < eq_num; j++) {
+ eq = &eq_table->eq[j];
+ eq->hr_dev = hr_dev;
+ eq->eqn = j;
+ if (j < comp_num) {
+ /* CEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j + other_num + aeq_num];
+ eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+ } else {
+ /* AEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j - comp_num + other_num];
+ eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+ }
+
+ ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+ if (ret) {
+ dev_err(dev, "eq create failed.\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+ /* irq contains: abnormal + AEQ + CEQ*/
+ for (k = 0; k < irq_num; k++)
+ if (k < other_num)
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
+ else if (k < (other_num + aeq_num))
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+ k - other_num);
+ else
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+ k - other_num - aeq_num);
+
+ for (k = 0; k < irq_num; k++) {
+ if (k < other_num)
+ ret = request_irq(hr_dev->irq[k],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[k], hr_dev);
+
+ else if (k < (other_num + comp_num))
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k + aeq_num],
+ &eq_table->eq[k - other_num]);
+ else
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k - comp_num],
+ &eq_table->eq[k - other_num]);
+ if (ret) {
+ dev_err(dev, "Request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ return 0;
+
+err_request_irq_fail:
+ for (k -= 1; k >= 0; k--)
+ if (k < other_num)
+ free_irq(hr_dev->irq[k], hr_dev);
+ else
+ free_irq(eq_table->eq[k - other_num].irq,
+ &eq_table->eq[k - other_num]);
+
+err_create_eq_fail:
+ for (j -= 1; j >= 0; j--)
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
+
+err_failed_kzalloc:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ /* Disable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ hns_roce_v2_destroy_eqc(hr_dev, i);
+
+ free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
+
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
@@ -3183,6 +4646,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.post_recv = hns_roce_v2_post_recv,
.req_notify_cq = hns_roce_v2_req_notify_cq,
.poll_cq = hns_roce_v2_poll_cq,
+ .init_eq = hns_roce_v2_init_eq_table,
+ .cleanup_eq = hns_roce_v2_cleanup_eq_table,
};
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
@@ -3197,6 +4662,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct pci_device_id *id;
+ int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
@@ -3214,8 +4680,15 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
+ addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+ hr_dev->iboe.netdevs[0]->dev_addr);
+
+ for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+ i + handle->rinfo.base_vector);
+
/* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 0;
+ hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 04b7a51b8efb..960df095392a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -53,6 +53,10 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_IRQ_NUM 65
+#define HNS_ROCE_V2_COMP_VEC_NUM 63
+#define HNS_ROCE_V2_AEQE_VEC_NUM 1
+#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
@@ -78,6 +82,8 @@
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_EQE_HOP_NUM 2
+
#define HNS_ROCE_V2_GID_INDEX_NUM 256
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -105,6 +111,12 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+enum {
+ NO_ARMED = 0x0,
+ REG_NXT_CEQE = 0x2,
+ REG_NXT_SE_CEQE = 0x3
+};
+
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
@@ -229,6 +241,9 @@ struct hns_roce_v2_cq_context {
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
};
+#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
+
#define V2_CQC_BYTE_4_CQ_ST_S 0
#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
@@ -747,11 +762,14 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_cqe {
u32 byte_4;
- u32 rkey_immtdata;
+ union {
+ __le32 rkey;
+ __be32 immtdata;
+ };
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
- u32 smac;
+ u8 smac[4];
u32 byte_28;
u32 byte_32;
};
@@ -901,6 +919,90 @@ struct hns_roce_v2_cq_db {
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+struct hns_roce_v2_ud_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 byte_24;
+ u32 qkey;
+ u32 byte_32;
+ u32 byte_36;
+ u32 byte_40;
+ u32 dmac;
+ u32 byte_48;
+ u8 dgid[GID_LEN_V2];
+
+};
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_UD_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_UD_SEND_WQE_BYTE_16_PD_S 0
+#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16)
+
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0)
+
+#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
+#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
+
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
+
+#define V2_UD_SEND_WQE_DMAC_0_S 0
+#define V2_UD_SEND_WQE_DMAC_0_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_DMAC_1_S 8
+#define V2_UD_SEND_WQE_DMAC_1_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_DMAC_2_S 16
+#define V2_UD_SEND_WQE_DMAC_2_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_DMAC_3_S 24
+#define V2_UD_SEND_WQE_DMAC_3_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_S 0
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_S 8
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S 16
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_S 24
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
+
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
@@ -1129,9 +1231,6 @@ struct hns_roce_cmq_desc {
u32 data[6];
};
-#define ROCEE_VF_MB_CFG0_REG 0x40
-#define ROCEE_VF_MB_STATUS_REG 0x58
-
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
@@ -1174,4 +1273,178 @@ struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
};
+struct hns_roce_eq_context {
+ u32 byte_4;
+ u32 byte_8;
+ u32 byte_12;
+ u32 eqe_report_timer;
+ u32 eqe_ba0;
+ u32 eqe_ba1;
+ u32 byte_28;
+ u32 byte_32;
+ u32 byte_36;
+ u32 nxt_eqe_ba0;
+ u32 nxt_eqe_ba1;
+ u32 rsv[5];
+};
+
+#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x0
+
+#define HNS_ROCE_V2_EQ_STATE_INVALID 0
+#define HNS_ROCE_V2_EQ_STATE_VALID 1
+#define HNS_ROCE_V2_EQ_STATE_OVERFLOW 2
+#define HNS_ROCE_V2_EQ_STATE_FAILURE 3
+
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_0 0
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_1 1
+
+#define HNS_ROCE_V2_EQ_COALESCE_0 0
+#define HNS_ROCE_V2_EQ_COALESCE_1 1
+
+#define HNS_ROCE_V2_EQ_FIRED 0
+#define HNS_ROCE_V2_EQ_ARMED 1
+#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
+
+#define HNS_ROCE_EQ_INIT_EQE_CNT 0
+#define HNS_ROCE_EQ_INIT_PROD_IDX 0
+#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
+#define HNS_ROCE_EQ_INIT_MSI_IDX 0
+#define HNS_ROCE_EQ_INIT_CONS_IDX 0
+#define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0
+
+#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31
+#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31
+
+#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000
+#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
+
+#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
+#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
+#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+
+#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
+#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
+#define HNS_ROCE_EQ_DB_CMD_CEQ 0x2
+#define HNS_ROCE_EQ_DB_CMD_CEQ_ARMED 0x3
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define EQ_REG_OFFSET 0x4
+
+#define HNS_ROCE_INT_NAME_LEN 32
+#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
+#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+
+/* WORD0 */
+#define HNS_ROCE_EQC_EQ_ST_S 0
+#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
+
+#define HNS_ROCE_EQC_HOP_NUM_S 2
+#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
+
+#define HNS_ROCE_EQC_OVER_IGNORE_S 4
+#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
+
+#define HNS_ROCE_EQC_COALESCE_S 5
+#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
+
+#define HNS_ROCE_EQC_ARM_ST_S 6
+#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
+
+#define HNS_ROCE_EQC_EQN_S 8
+#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_EQE_CNT_S 16
+#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
+
+/* WORD1 */
+#define HNS_ROCE_EQC_BA_PG_SZ_S 0
+#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
+#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define HNS_ROCE_EQC_PROD_INDX_S 8
+#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
+
+/* WORD2 */
+#define HNS_ROCE_EQC_MAX_CNT_S 0
+#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
+
+#define HNS_ROCE_EQC_PERIOD_S 16
+#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
+
+/* WORD3 */
+#define HNS_ROCE_EQC_REPORT_TIMER_S 0
+#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
+
+/* WORD4 */
+#define HNS_ROCE_EQC_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD5 */
+#define HNS_ROCE_EQC_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
+
+/* WORD6 */
+#define HNS_ROCE_EQC_SHIFT_S 0
+#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
+
+#define HNS_ROCE_EQC_MSI_INDX_S 8
+#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
+
+/* WORD7 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
+
+/* WORD8 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_CONS_INDX_S 8
+#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
+
+/* WORD9 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD10 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
+
+#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
+#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
+
+#define HNS_ROCE_V2_EQ_DB_CMD_S 16
+#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+
+#define HNS_ROCE_V2_EQ_DB_TAG_S 0
+#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_EQ_DB_PARA_S 0
+#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf02ac2d3596..aa0c242ddc50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -748,12 +748,10 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_cmd_init;
}
- if (hr_dev->cmd_mod) {
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
- }
+ ret = hr_dev->hw->init_eq(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
}
if (hr_dev->cmd_mod) {
@@ -805,8 +803,7 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
@@ -837,8 +834,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 49586ec8126a..4414cea9ef56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,6 +65,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_event);
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type)
@@ -454,6 +455,13 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ /* ud sqwqe's sge use extend sge */
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ hr_qp->sq.max_gs);
+ hr_qp->sge.sge_shift = 4;
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -493,6 +501,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
int ret = 0;
u32 page_shift;
u32 npages;
+ int i;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -500,6 +509,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
@@ -512,18 +523,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ /* allocate recv inline buf */
+ hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
+ sizeof(struct hns_roce_rinl_wqe),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
+
+ /* Firstly, allocate a list of sge space buffer */
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list =
+ kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
+ init_attr->cap.max_recv_sge *
+ sizeof(struct hns_roce_rinl_sge),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
+ ret = -ENOMEM;
+ goto err_wqe_list;
+ }
+
+ for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
+ /* Secondly, reallocate the buffer */
+ hr_qp->rq_inl_buf.wqe_list[i].sg_list =
+ &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
+ init_attr->cap.max_recv_sge];
+ }
+
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
- goto err_out;
+ goto err_rq_sge_list;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
@@ -532,7 +573,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -566,13 +607,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
/* Set SQ size */
@@ -580,7 +621,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_out;
+ goto err_rq_sge_list;
}
/* QP doorbell register address */
@@ -596,7 +637,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -678,6 +719,14 @@ err_buf:
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_rq_sge_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+
+err_wqe_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+
err_out:
return ret;
}
@@ -724,8 +773,13 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
+
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ hr_qp->ibqp.qp_num = 1;
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp);
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index f6d20ba88c03..2962979c06e9 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -5,4 +5,3 @@ config INFINIBAND_I40IW
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
- INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 4ae9131b6350..bcddd7061fc0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -587,5 +587,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 77870f9e1736..abf4cd897849 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -92,14 +92,9 @@ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
{
u8 encoded_ird_size;
- u8 pof2_cm_ird = 1;
-
- /* round-off to next powerof2 */
- while (pof2_cm_ird < cm_ird)
- pof2_cm_ird *= 2;
/* ird_size field is encoded in qp_ctx */
- switch (pof2_cm_ird) {
+ switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
case I40IW_HW_IRD_SETTING_64:
encoded_ird_size = 3;
break;
@@ -125,13 +120,16 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
* @conn_ird: connection IRD
* @conn_ord: connection ORD
*/
-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
{
if (conn_ird > I40IW_MAX_IRD_SIZE)
conn_ird = I40IW_MAX_IRD_SIZE;
if (conn_ord > I40IW_MAX_ORD_SIZE)
conn_ord = I40IW_MAX_ORD_SIZE;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
cm_node->ird_size = conn_ird;
cm_node->ord_size = conn_ord;
@@ -2878,15 +2876,13 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
* i40iw_create_cm_node - make a connection node with params
* @cm_core: cm's core
* @iwdev: iwarp device structure
- * @private_data_len: len to provate data for mpa request
- * @private_data: pointer to private data for connection
+ * @conn_param: upper layer connection parameters
* @cm_info: quad info for connection
*/
static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_core *cm_core,
struct i40iw_device *iwdev,
- u16 private_data_len,
- void *private_data,
+ struct iw_cm_conn_param *conn_param,
struct i40iw_cm_info *cm_info)
{
struct i40iw_cm_node *cm_node;
@@ -2894,6 +2890,9 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_node *loopback_remotenode;
struct i40iw_cm_info loopback_cm_info;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
@@ -2902,6 +2901,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
loopback_remotelistener = i40iw_find_listener(
cm_core,
@@ -2935,6 +2936,10 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
private_data_len);
loopback_remotenode->pdata.size = private_data_len;
+ if (loopback_remotenode->ord_size > cm_node->ird_size)
+ loopback_remotenode->ord_size =
+ cm_node->ird_size;
+
cm_node->state = I40IW_CM_STATE_OFFLOADED;
cm_node->tcp_cntxt.rcv_nxt =
loopback_remotenode->tcp_cntxt.loc_seq_num;
@@ -3691,7 +3696,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status =
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
if (status)
@@ -3815,9 +3820,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, cm_id->tos, cm_info.user_pri);
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
- conn_param->private_data_len,
- (void *)conn_param->private_data,
- &cm_info);
+ conn_param, &cm_info);
if (IS_ERR(cm_node)) {
ret = PTR_ERR(cm_node);
@@ -3849,11 +3852,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
cm_node->apbvt_set = true;
- i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
- !cm_node->ord_size)
- cm_node->ord_size = 1;
-
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -4058,7 +4056,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
0);
if (status)
@@ -4242,10 +4240,16 @@ set_qhash:
}
/**
- * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @disconnect_all: flag indicating disconnect all QPs
+ * teardown QPs where source or destination addr matches ip addr
*/
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct list_head *list_core_temp;
@@ -4259,8 +4263,13 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct i40iw_cm_node, list);
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->connected_entry, &connected_list);
+ if (disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
+ !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
@@ -4294,6 +4303,9 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
enum i40iw_quad_hash_manage_type op =
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+
/* Disable or enable qhash for listeners */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
@@ -4303,8 +4315,6 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
memcpy(nfo.loc_addr, listen_node->loc_addr,
sizeof(nfo.loc_addr));
nfo.loc_port = listen_node->loc_port;
- nfo.ipv4 = listen_node->ipv4;
- nfo.vlan_id = listen_node->vlan_id;
nfo.user_pri = listen_node->user_pri;
if (!list_empty(&listen_node->child_listen_list)) {
i40iw_qhash_ctrl(iwdev,
@@ -4326,7 +4336,7 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- /* disconnect any connected qp's on ifdown */
+ /* teardown connected qp's on ifdown */
if (!ifup)
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 0d5840d2c4fc..cf60c451e071 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -276,8 +276,6 @@ struct i40iw_cm_tcp_context {
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
-
- struct timeval sent_ts;
};
enum i40iw_cm_listener_state {
@@ -337,7 +335,7 @@ struct i40iw_cm_node {
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
@@ -455,5 +453,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index da9821a10e0d..c74fd3309b93 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1893,8 +1893,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
u32 count)
{
- if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
- return I40IW_ERR_INVALID_SIZE;
if (dev->is_pf)
i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
@@ -3872,7 +3870,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
struct i40iw_virt_mem virt_mem;
u32 i, mem_size;
u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
- u32 powerof2;
u64 sd_needed;
u32 loop_count = 0;
@@ -3928,8 +3925,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+ roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+ roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
@@ -3945,16 +3944,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
if ((loop_count > 1000) ||
((!(loop_count % 10)) &&
(qpwanted > qpwantedoriginal * 2 / 3))) {
- if (qpwanted > FPM_MULTIPLIER) {
- qpwanted -= FPM_MULTIPLIER;
- powerof2 = 1;
- while (powerof2 < qpwanted)
- powerof2 *= 2;
- powerof2 /= 2;
- qpwanted = powerof2;
- } else {
- qpwanted /= 2;
- }
+ if (qpwanted > FPM_MULTIPLIER)
+ qpwanted = roundup_pow_of_two(qpwanted -
+ FPM_MULTIPLIER);
+ qpwanted >>= 1;
}
if (mrwanted > FPM_MULTIPLIER * 10)
mrwanted -= FPM_MULTIPLIER * 10;
@@ -3962,8 +3955,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
pblewanted -= FPM_MULTIPLIER * 1000;
} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
- sd_needed = i40iw_est_sd(dev, hmc_info);
-
i40iw_debug(dev, I40IW_DEBUG_HMC,
"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
loop_count, sd_needed,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 029083cb81d5..4b65e4140bd7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -97,6 +97,7 @@
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define Q2_BAD_FRAME_OFFSET 72
+#define Q2_FPSN_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index e96bdafbcbb3..61540e14e4b9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -385,6 +385,8 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
@@ -403,7 +405,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e824296713e2..b08862978de8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,6 +99,10 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
+static struct notifier_block i40iw_netdevice_notifier = {
+ .notifier_call = i40iw_netdevice_event
+};
+
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -483,6 +487,7 @@ static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
status = i40iw_create_hmc_obj_type(dev, &info);
if (status) {
i40iw_pr_err("create obj type %d status = %d\n",
@@ -607,7 +612,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
/* init the waitq of the cqp_requests and add them to the list */
- for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ for (i = 0; i < sqsize; i++) {
init_waitqueue_head(&cqp->cqp_requests[i].waitq);
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
}
@@ -1285,7 +1290,7 @@ static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
__LINE__, statuscpu2);
if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
break; /* SUCCESS */
- mdelay(1000);
+ msleep(1000);
retrycount++;
} while (retrycount < 14);
i40iw_wr32(hw, 0xb4040, 0x4C104C5);
@@ -1393,6 +1398,7 @@ static void i40iw_register_notifiers(void)
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
+ register_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1404,6 +1410,7 @@ static void i40iw_unregister_notifiers(void)
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ unregister_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1793,7 +1800,7 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
if (reset)
iwdev->reset = true;
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
destroy_workqueue(iwdev->virtchnl_wq);
i40iw_deinit_device(iwdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 796a815b53fd..4c21197830b3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -1378,7 +1377,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
/* first partial seq # in q2 */
- u32 fps = qp->q2_buf[16];
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
struct list_head *plist;
@@ -1483,7 +1482,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
* @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 660aa3edae56..53a7d58c84b5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -184,4 +184,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 3ec5389a81a1..8afa5a67a86b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -894,20 +894,6 @@ exit:
}
/**
- * i40iw_qp_roundup - return round up QP WQ depth
- * @wqdepth: WQ depth in quantas to round up
- */
-static int i40iw_qp_round_up(u32 wqdepth)
-{
- int scount = 1;
-
- for (wqdepth--; scount <= 16; scount *= 2)
- wqdepth |= wqdepth >> scount;
-
- return ++wqdepth;
-}
-
-/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
@@ -934,7 +920,7 @@ void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
*/
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
+ *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
@@ -953,7 +939,7 @@ enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
*/
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+ *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index e73efc59a0ab..b125925641e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -59,7 +59,6 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
- I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
@@ -72,7 +71,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 8845dba7c438..ddc1056b0b4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -137,7 +137,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
}
/**
- * i40iw_inetaddr_event - system notifier for netdev events
+ * i40iw_inetaddr_event - system notifier for ipv4 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -200,7 +200,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
}
/**
- * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * i40iw_inet6addr_event - system notifier for ipv6 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -252,7 +252,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
}
/**
- * i40iw_net_event - system notifier for net events
+ * i40iw_net_event - system notifier for netevents
* @notfier: not used
* @event: event for notifier
* @ptr: neighbor
@@ -297,6 +297,50 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
}
/**
+ * i40iw_netdevice_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *event_netdev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
+ return NOTIFY_DONE;
+
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ iwdev->iw_status = 1;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ /* Fall through */
+ case NETDEV_UP:
+ i40iw_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
* i40iw_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
* @wait: cqp to be used in wait mode
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 3c6f3ce88f89..70024e8e2692 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -412,6 +412,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
{
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
@@ -1637,6 +1638,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
err_code = -EOVERFLOW;
goto err;
}
+ stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
@@ -2242,14 +2244,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
@@ -2271,7 +2271,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op_type = I40IW_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index bf4f14a1b4fc..9a566ee3ceff 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -170,7 +170,7 @@ err_buf:
return err;
}
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
+#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -246,7 +246,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
if (err)
goto err_dbmap;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8c8a16791a3f..8d2ee9322f2e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -589,6 +589,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+
resp.rss_caps.rx_hash_fields_mask =
MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
@@ -598,6 +599,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP;
+
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ resp.rss_caps.rx_hash_fields_mask |=
+ MLX4_IB_RX_HASH_INNER;
}
}
@@ -2995,9 +3001,8 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (i = 0; i < ibdev->num_ports; ++i)
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -3102,11 +3107,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9ccb71..4975f3e6596e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049bcdb53..f045491f2c14 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ ucmd->rx_hash_fields_mask);
+ return (-EOPNOTSUPP);
+ }
+
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
@@ -707,25 +720,38 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
-
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /*
+ * Hash according to inner headers if exist, otherwise
+ * according to outer headers.
+ */
+ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
+ } else {
+ pr_debug("RSS Hash for inner headers isn't supported\n");
+ return (-EOPNOTSUPP);
+ }
+ }
+
return 0;
}
-static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
+static int create_qp_rss(struct mlx4_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx4_ib_create_qp_rss *ucmd,
struct mlx4_ib_qp *qp)
@@ -848,7 +874,7 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- err = create_qp_rss(to_mdev(pd->device), pd, init_attr, &ucmd, qp);
+ err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1824,6 +1850,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
mlx4_ib_gid_index_to_real_index(dev, port,
grh->sgid_index);
+ if (real_sgid_index < 0)
+ return real_sgid_index;
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 470995fa38d2..6f6712f87a73 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
return err;
}
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
- MLX5_SET(query_cong_statistics_in, in, opcode,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS);
- MLX5_SET(query_cong_statistics_in, in, clear, reset);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size)
{
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index af4c24596274..78ffded7cc2c 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,8 +37,6 @@
#include <linux/mlx5/driver.h>
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 2d32b519bb61..985fa2637390 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -247,21 +247,30 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
void *out;
void *field;
int err;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
if (err)
goto free;
@@ -270,21 +279,32 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
free:
kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
void *in;
void *field;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
u32 attr_mask = 0;
int err;
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
@@ -299,8 +319,10 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
+ err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
@@ -324,7 +346,7 @@ static ssize_t set_param(struct file *filp, const char __user *buf,
if (kstrtou32(lbuf, 0, &var))
return -EINVAL;
- ret = mlx5_ib_set_cc_params(param->dev, offset, var);
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
return ret ? ret : count;
}
@@ -340,7 +362,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (*pos)
return 0;
- ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -362,44 +384,51 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
if (!mlx5_debugfs_root ||
- !dev->dbg_cc_params ||
- !dev->dbg_cc_params->root)
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
return;
- debugfs_remove_recursive(dev->dbg_cc_params->root);
- kfree(dev->dbg_cc_params);
- dev->dbg_cc_params = NULL;
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
}
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
int i;
if (!mlx5_debugfs_root)
goto out;
- if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
- !MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
goto out;
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
if (!dbg_cc_params)
- goto out;
+ goto err;
- dev->dbg_cc_params = dbg_cc_params;
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
dbg_cc_params->root = debugfs_create_dir("cc_params",
- dev->mdev->priv.dbg_root);
+ mdev->priv.dbg_root);
if (!dbg_cc_params->root)
goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
dbg_cc_params->params[i].dentry =
debugfs_create_file(mlx5_ib_dbg_cc_name[i],
0600, dbg_cc_params->root,
@@ -408,11 +437,17 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
if (!dbg_cc_params->params[i].dentry)
goto err;
}
-out: return 0;
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+out:
+ return 0;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
- mlx5_ib_cleanup_cong_debugfs(dev);
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
/*
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18705cbcdc8c..5b974fb97611 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1010,7 +1010,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1003b0133a49..32a9e9228b13 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -197,10 +197,9 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err;
void *out_cnt;
@@ -222,7 +221,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
@@ -235,7 +234,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ err = mlx5_core_query_ib_ppcnt(mdev, port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
@@ -255,9 +254,11 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
+ int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
@@ -265,14 +266,20 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data));
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return IB_MAD_RESULT_FAILURE;
+
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
} else {
- return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad);
}
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return ret;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
@@ -519,7 +526,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ if (port < 1 || port > dev->num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 543d0a4c8bf3..4236c8086820 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,16 +50,14 @@
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
-#include <linux/mlx5/fs.h>
-#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
-#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@@ -72,10 +70,36 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION "\n";
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ struct mlx5_core_dev *dev;
+ void *context;
+ enum mlx5_dev_event event;
+ unsigned long param;
+};
+
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
};
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
+{
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
+
static enum rdma_link_layer
mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
{
@@ -115,24 +139,32 @@ static int get_port_state(struct ib_device *ibdev,
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
- roce.nb);
+ u8 port_num = roce->native_port_num;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
- NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ write_lock(&roce->netdev_lock);
+
+ if (ndev->dev.parent == &mdev->pdev->dev)
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&roce->netdev_lock);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
if (lag_ndev) {
@@ -140,27 +172,28 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
- if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
- if (get_port_state(&ibdev->ib_dev, 1, &port_state))
- return NOTIFY_DONE;
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto done;
- if (ibdev->roce.last_port_state == port_state)
- return NOTIFY_DONE;
+ if (roce->last_port_state == port_state)
+ goto done;
- ibdev->roce.last_port_state = port_state;
+ roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
if (port_state == IB_PORT_DOWN)
ibev.event = IB_EVENT_PORT_ERR;
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- return NOTIFY_DONE;
+ goto done;
- ibev.element.port_num = 1;
+ ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
}
break;
@@ -169,7 +202,8 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
-
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
@@ -178,22 +212,88 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ struct mlx5_core_dev *mdev;
- ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NULL;
+
+ ndev = mlx5_lag_get_roce_netdev(mdev);
if (ndev)
- return ndev;
+ goto out;
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce.netdev_lock);
- ndev = ibdev->roce.netdev;
+ read_lock(&ibdev->roce[port_num - 1].netdev_lock);
+ ndev = ibdev->roce[port_num - 1].netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce.netdev_lock);
+ read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+out:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return ndev;
}
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u8 ib_port_num,
+ u8 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return ibdev->mdev;
+
+ port = &ibdev->port[ib_port_num - 1];
+ if (!port)
+ return NULL;
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
@@ -256,19 +356,33 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
+ u8 mdev_port_num;
int err;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
+ err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
+ mdev_port_num);
if (err)
- return err;
+ goto out;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
@@ -284,12 +398,16 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->state = IB_PORT_DOWN;
props->phys_state = 3;
- mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr;
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev)
- return 0;
+ goto out;
if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock();
@@ -312,7 +430,10 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
- return 0;
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
}
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
@@ -354,7 +475,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac, vlan,
- vlan_id);
+ vlan_id, port_num);
}
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -438,11 +559,11 @@ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
}
static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
struct ib_device_attr *props)
{
u8 tmp;
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
u8 atomic_req_8B_endianness_mode =
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
@@ -459,6 +580,29 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev,
}
}
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
+{
+ struct ib_device_attr props = {};
+
+ get_atomic_caps_dc(dev, &props);
+ return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
+}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -587,6 +731,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
struct mlx5_ib_query_device_resp resp = {};
size_t resp_len;
u64 max_tso;
@@ -650,7 +795,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
if (MLX5_CAP_ETH(mdev, csum_cap)) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
@@ -682,7 +827,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_TCP |
MLX5_RX_HASH_DST_PORT_TCP |
MLX5_RX_HASH_SRC_PORT_UDP |
- MLX5_RX_HASH_DST_PORT_UDP;
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -698,7 +844,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
- MLX5_CAP_GEN(dev->mdev, general_notification_event))
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
@@ -706,7 +853,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
@@ -746,7 +894,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len =
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
- get_atomic_caps(dev, props);
+ get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
@@ -770,7 +918,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ IB_LINK_LAYER_ETHERNET && raw_support) {
props->rss_caps.max_rwq_indirection_tables =
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
props->rss_caps.max_rwq_indirection_table_size =
@@ -807,7 +955,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_comp_caps);
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
resp.packet_pacing_caps.qp_rate_limit_max =
@@ -866,7 +1015,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
@@ -1097,7 +1247,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
}
if (!ret && props) {
- count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
props->gid_tbl_len -= count;
}
return ret;
@@ -1122,20 +1287,43 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u8 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+
+ return err;
+}
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
- return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
- pkey);
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
default:
return -EINVAL;
}
@@ -1174,23 +1362,32 @@ static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
int err;
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
if (err)
- return err;
+ goto out;
if (~ctx.cap_mask1_perm & mask) {
mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
mask, ctx.cap_mask1_perm);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
ctx.cap_mask1 = value;
ctx.cap_mask1_perm = mask;
- err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
return err;
}
@@ -1241,9 +1438,18 @@ static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
- u32 *num_sys_pages)
+ struct mlx5_bfreg_info *bfregi)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
@@ -1260,16 +1466,21 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
- *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
-
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
- mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
- req->total_num_bfregs, *num_sys_pages);
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
return 0;
}
@@ -1281,13 +1492,17 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
}
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
return 0;
error:
@@ -1306,12 +1521,16 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d\n", i);
- return err;
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
+ err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
+ return err;
+ }
}
}
+
return 0;
}
@@ -1324,7 +1543,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1562,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
mutex_lock(&dev->lb_mutex);
@@ -1360,6 +1581,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_ucontext *context;
struct mlx5_bfreg_info *bfregi;
int ver;
@@ -1420,13 +1642,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bfregi = &context->bfregi;
/* updates req->total_num_bfregs */
- err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
- bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
@@ -1463,11 +1685,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
}
INIT_LIST_HEAD(&context->vma_private_list);
+ mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
resp.tot_bfregs = req.total_num_bfregs;
- resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
+ resp.num_ports = dev->num_ports;
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
@@ -1486,6 +1709,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.eth_min_inline);
}
+ if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (mdev->clock_info)
+ resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+ resp.response_length += sizeof(resp.clock_info_versions);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1499,8 +1728,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
}
- resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2);
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
}
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
@@ -1509,6 +1737,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
+ if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
+ resp.response_length += sizeof(resp.num_dyn_bfregs);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
@@ -1563,15 +1796,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- int idx)
+ int uar_idx)
{
int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
- bfregi->sys_pages[idx] / fw_uars_per_page;
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -1589,6 +1820,12 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
+{
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
+
static void mlx5_ib_vma_open(struct vm_area_struct *area)
{
/* vma_open is called when a new VMA is created on top of our VMA. This
@@ -1624,7 +1861,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area)
* mlx5_ib_disassociate_ucontext().
*/
mlx5_ib_vma_priv_data->vma = NULL;
+ mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
list_del(&mlx5_ib_vma_priv_data->list);
+ mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
kfree(mlx5_ib_vma_priv_data);
}
@@ -1644,10 +1883,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
return -ENOMEM;
vma_prv->vma = vma;
+ vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
vma->vm_private_data = vma_prv;
vma->vm_ops = &mlx5_ib_vm_ops;
+ mutex_lock(&ctx->vma_private_list_mutex);
list_add(&vma_prv->list, vma_head);
+ mutex_unlock(&ctx->vma_private_list_mutex);
return 0;
}
@@ -1690,6 +1932,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx5_ib_vma_close.
*/
down_write(&owning_mm->mmap_sem);
+ mutex_lock(&context->vma_private_list_mutex);
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
@@ -1704,6 +1947,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
list_del(&vma_private->list);
kfree(vma_private);
}
+ mutex_unlock(&context->vma_private_list_mutex);
up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
@@ -1723,6 +1967,38 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
}
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ phys_addr_t pfn;
+ int err;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (!dev->mdev->clock_info_page)
+ return -EOPNOTSUPP;
+
+ pfn = page_to_pfn(dev->mdev->clock_info_page);
+ err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (err)
+ return err;
+
+ mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
+ vma->vm_start,
+ (unsigned long long)pfn << PAGE_SHIFT);
+
+ return mlx5_ib_set_vma_data(vma, context);
+}
+
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
@@ -1732,21 +2008,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
- int uars_per_page;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
- idx = get_index(vma->vm_pgoff);
- if (idx % uars_per_page ||
- idx * uars_per_page >= bfregi->num_sys_pages) {
- mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
+
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
return -EINVAL;
}
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
/* Some architectures don't support WC memory */
#if defined(CONFIG_X86)
if (!pat_enabled())
@@ -1766,7 +2050,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
}
- pfn = uar_index2pfn(dev, bfregi, idx);
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+
+ pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
@@ -1775,14 +2092,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto err;
}
pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return mlx5_ib_set_vma_data(vma, context);
+ err = mlx5_ib_set_vma_data(vma, context);
+ if (err)
+ goto err;
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
+ return 0;
+
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_free_uar(dev->mdev, idx);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
+
+ return err;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -1797,6 +2132,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -1825,6 +2161,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
vma->vm_start,
(unsigned long long)pfn << PAGE_SHIFT);
break;
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
default:
return -EINVAL;
@@ -2653,7 +2991,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
if (domain != IB_FLOW_DOMAIN_USER ||
- flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
+ flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
@@ -2918,15 +3256,24 @@ static void delay_drop_handler(struct work_struct *work)
mutex_unlock(&delay_drop->lock);
}
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static void mlx5_ib_handle_event(struct work_struct *_work)
{
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
u8 port = 0;
- switch (event) {
+ if (mlx5_core_is_mp_slave(work->dev)) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->context;
+ }
+
+ switch (work->event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
@@ -2936,39 +3283,39 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)param;
+ port = (u8)work->param;
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
- return;
+ goto out;
- ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -2990,9 +3337,26 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
-
out:
- return;
+ kfree(work);
+}
+
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = dev;
+ work->param = param;
+ work->context = context;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -3001,7 +3365,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
+ for (port = 1; port <= dev->num_ports; port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -3028,16 +3392,15 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
+ for (port = 1; port <= dev->num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev)
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- int port;
struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
@@ -3058,22 +3421,21 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
- memset(pprops, 0, sizeof(*pprops));
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- break;
- }
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
- dprops->max_pkeys, pprops->gid_tbl_len);
+ memset(pprops, 0, sizeof(*pprops));
+ err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
+ goto out;
}
+ dev->mdev->port_caps[port - 1].pkey_table_len =
+ dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len =
+ pprops->gid_tbl_len;
+ mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
+ port, dprops->max_pkeys, pprops->gid_tbl_len);
+
out:
kfree(pprops);
kfree(dprops);
@@ -3363,12 +3725,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
if (ll == IB_LINK_LAYER_INFINIBAND)
return RDMA_CORE_PORT_IBA_IB;
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ if (raw_support)
+ ret = RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -3458,33 +3822,33 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
+ dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce[port_num].nb);
if (err) {
- dev->roce.nb.notifier_call = NULL;
+ dev->roce[port_num].nb.notifier_call = NULL;
return err;
}
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce.nb);
- dev->roce.nb.notifier_call = NULL;
+ if (dev->roce[port_num].nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->roce[port_num].nb.notifier_call = NULL;
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- err = mlx5_add_netdev_notifier(dev);
+ err = mlx5_add_netdev_notifier(dev, port_num);
if (err)
return err;
@@ -3505,7 +3869,7 @@ err_disable_roce:
mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_netdev_notifier(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
return err;
}
@@ -3567,11 +3931,12 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
- unsigned int i;
+ int i;
for (i = 0; i < dev->num_ports; i++) {
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -3613,6 +3978,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
err_names:
kfree(cnts->names);
+ cnts->names = NULL;
return -ENOMEM;
}
@@ -3659,37 +4025,33 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ int err = 0;
int i;
- int ret;
for (i = 0; i < dev->num_ports; i++) {
- struct mlx5_ib_port *port = &dev->port[i];
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
+ if (err)
+ goto err_alloc;
- ret = mlx5_core_alloc_q_counter(dev->mdev,
- &port->cnts.set_id);
- if (ret) {
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ dev->port[i].cnts.offsets);
+
+ err = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].cnts.set_id);
+ if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
- i + 1, ret);
- goto dealloc_counters;
+ i + 1, err);
+ goto err_alloc;
}
-
- ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
- if (ret)
- goto dealloc_counters;
-
- mlx5_ib_fill_counters(dev, port->cnts.names,
- port->cnts.offsets);
+ dev->port[i].cnts.set_id_valid = true;
}
return 0;
-dealloc_counters:
- while (--i >= 0)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
-
- return ret;
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
@@ -3708,7 +4070,7 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct mlx5_ib_port *port,
struct rdma_hw_stats *stats)
{
@@ -3721,7 +4083,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_q_counter(dev->mdev,
+ ret = mlx5_core_query_q_counter(mdev,
port->cnts.set_id, 0,
out, outlen);
if (ret)
@@ -3737,57 +4099,49 @@ free:
return ret;
}
-static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
- struct mlx5_ib_port *port,
- struct rdma_hw_stats *stats)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
- void *out;
- int ret, i;
- int offset = port->cnts.num_q_counters;
-
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
- if (ret)
- goto free;
-
- for (i = 0; i < port->cnts.num_cong_counters; i++) {
- stats->value[i + offset] =
- be64_to_cpup((__be64 *)(out +
- port->cnts.offsets[i + offset]));
- }
-
-free:
- kvfree(out);
- return ret;
-}
-
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ struct mlx5_core_dev *mdev;
int ret, num_counters;
+ u8 mdev_port_num;
if (!stats)
return -EINVAL;
- ret = mlx5_ib_query_q_counters(dev, port, stats);
+ num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+
+ /* q_counters are per IB device, query the master mdev */
+ ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
- num_counters = port->cnts.num_q_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
- ret = mlx5_ib_query_cong_counters(dev, port, stats);
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
+ &mdev_port_num);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
+ ret = mlx5_lag_query_cong_counters(dev->mdev,
+ stats->value +
+ port->cnts.num_q_counters,
+ port->cnts.num_cong_counters,
+ port->cnts.offsets +
+ port->cnts.num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret)
return ret;
- num_counters += port->cnts.num_cong_counters;
}
+done:
return num_counters;
}
@@ -3949,36 +4303,250 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
{
- struct mlx5_ib_dev *dev;
- enum rdma_link_layer ll;
- int port_type_cap;
- const char *name;
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
int err;
int i;
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
- printk_once(KERN_INFO "%s", mlx5_version);
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+ mpi->ibdev = NULL;
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
+ spin_unlock(&port->mp.mpi_lock);
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
- dev->mdev = mdev;
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
+ if (err)
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+}
+
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ int err;
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
+
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
+ err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
+ if (err)
+ goto unbind;
+
+ err = mlx5_add_netdev_notifier(ibdev, port_num);
+ if (err) {
+ mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
+ port_num + 1);
+ goto unbind;
+ }
+
+ err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
+ if (err)
+ goto unbind;
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
+ if (err)
+ return err;
+
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
- dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound) {
+ get_port_caps(dev, i + 1);
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
+ }
+ }
+
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
+
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ }
+ }
+ }
+
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_multiport_master(dev);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&dev->mr_srcu);
+#endif
+ kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const char *name;
+ int err;
+ int i;
+
+ dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port)
- goto err_dealloc;
+ return -ENOMEM;
- rwlock_init(&dev->roce.netdev_lock);
- err = get_port_caps(dev);
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ rwlock_init(&dev->roce[i].netdev_lock);
+ }
+
+ err = mlx5_ib_init_multiport_master(dev);
if (err)
goto err_free_port;
+ if (!mlx5_core_mp_enabled(mdev)) {
+ int i;
+
+ for (i = 1; i <= dev->num_ports; i++) {
+ err = get_port_caps(dev, i);
+ if (err)
+ break;
+ }
+ } else {
+ err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
+ }
+ if (err)
+ goto err_mp;
+
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -3991,12 +4559,37 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
- dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_free_port;
+#endif
+
+ return 0;
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+
+err_free_port:
+ kfree(dev->port);
+
+ return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4035,8 +4628,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
- if (ll == IB_LINK_LAYER_ETHERNET)
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
dev->ib_dev.del_gid = mlx5_ib_del_gid;
@@ -4093,8 +4684,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
- mlx5_ib_internal_fill_odp_caps(dev);
-
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4105,11 +4694,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
- dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
- }
-
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4124,8 +4708,39 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
- if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb_mutex);
+
+ return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+ int err;
+ int i;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->roce[i].dev = dev;
+ dev->roce[i].native_port_num = i + 1;
+ dev->roce[i].last_port_state = IB_PORT_DOWN;
+ }
+
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
@@ -4137,142 +4752,329 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ err = mlx5_enable_eth(dev, port_num);
+ if (err)
+ return err;
}
- err = init_node_data(dev);
- if (err)
- goto err_free_port;
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
- INIT_LIST_HEAD(&dev->qp_list);
- spin_lock_init(&dev->reset_flow_resource_lock);
+ return 0;
+}
+
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_eth(dev);
- if (err)
- goto err_free_port;
- dev->roce.last_port_state = IB_PORT_DOWN;
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
}
+}
- err = create_dev_resources(&dev->devr);
- if (err)
- goto err_disable_eth;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_dev_resources(&dev->devr);
+}
- err = mlx5_ib_odp_init_one(dev);
- if (err)
- goto err_rsrc;
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_dev_resources(&dev->devr);
+}
+
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_internal_fill_odp_caps(dev);
+
+ return mlx5_ib_odp_init_one(dev);
+}
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- err = mlx5_ib_alloc_counters(dev);
- if (err)
- goto err_odp;
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+
+ return mlx5_ib_alloc_counters(dev);
}
- err = mlx5_ib_init_cong_debugfs(dev);
- if (err)
- goto err_cnt;
+ return 0;
+}
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ mlx5_ib_dealloc_counters(dev);
+}
+
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
- goto err_cong;
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
- goto err_uar_page;
+ return err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- goto err_bfreg;
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
- goto err_fp_bfreg;
+ return err;
+}
- err = create_umr_res(dev);
- if (err)
- goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_umrc_res(dev);
+}
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
init_delay_drop(dev);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+ int i;
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_delay_drop;
+ return err;
}
- if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(mdev, disable_local_lb))
- mutex_init(&dev->lb_mutex);
+ return 0;
+}
+
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ ib_dealloc_device((struct ib_device *)dev);
+}
+
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
+
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_profile *profile)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
+
+ printk_once(KERN_INFO "%s", mlx5_version);
+
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+ dev->mdev = mdev;
+ dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->profile = profile;
dev->ib_active = true;
return dev;
-err_delay_drop:
- cancel_delay_drop(dev);
- destroy_umrc_res(dev);
+err_out:
+ __mlx5_ib_remove(dev, profile, i);
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+ return NULL;
+}
-err_fp_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_roce_init,
+ mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_stage_odp_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+};
-err_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+{
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
-err_uar_page:
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return NULL;
-err_cong:
- mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
+ mpi->mdev = mdev;
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return NULL;
+ }
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid)
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
-err_disable_eth:
- if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev);
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ break;
+ }
}
-err_free_port:
- kfree(dev->port);
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ } else {
+ mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+ return mpi;
+}
- return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ enum rdma_link_layer ll;
+ int port_type_cap;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
+ u8 port_num = mlx5_core_native_port_num(mdev) - 1;
+
+ return mlx5_ib_add_slave_port(mdev, port_num);
+ }
+
+ return __mlx5_ib_add(mdev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
- struct mlx5_ib_dev *dev = context;
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
- cancel_delay_drop(dev);
- mlx5_remove_netdev_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
- mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
- mlx5_ib_cleanup_cong_debugfs(dev);
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_eth(dev);
- kfree(dev->port);
- ib_dealloc_device(&dev->ib_dev);
+ if (mlx5_core_is_mp_slave(mdev)) {
+ mpi = context;
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return;
+ }
+
+ dev = context;
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static struct mlx5_interface mlx5_ib_interface = {
@@ -4289,6 +5091,10 @@ static int __init mlx5_ib_init(void)
{
int err;
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq)
+ return -ENOMEM;
+
mlx5_ib_odp_init();
err = mlx5_register_interface(&mlx5_ib_interface);
@@ -4299,6 +5105,7 @@ static int __init mlx5_ib_init(void)
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ destroy_workqueue(mlx5_ib_event_wq);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6dd8cac78de2..139385129973 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,15 +70,6 @@ enum {
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
-enum mlx5_ib_mmap_cmd {
- MLX5_IB_MMAP_REGULAR_PAGE = 0,
- MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
- MLX5_IB_MMAP_WC_PAGE = 2,
- MLX5_IB_MMAP_NC_PAGE = 3,
- /* 5 is chosen in order to be compatible with old versions of libmlx5 */
- MLX5_IB_MMAP_CORE_CLOCK = 5,
-};
-
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
@@ -112,9 +103,16 @@ enum {
MLX5_TM_MAX_SGE = 1,
};
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
+ /* protect vma_private_list add/del */
+ struct mutex *vma_private_list_mutex;
};
struct mlx5_ib_ucontext {
@@ -129,6 +127,8 @@ struct mlx5_ib_ucontext {
/* Transport Domain number */
u32 tdn;
struct list_head vma_private_list;
+ /* protect vma_private_list add/del */
+ struct mutex vma_private_list_mutex;
unsigned long upd_xlt_page;
/* protect ODP/KSM */
@@ -196,6 +196,8 @@ struct mlx5_ib_flow_db {
* creates the actual hardware QP.
*/
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
#define MLX5_IB_UMR_OCTOWORD 16
@@ -356,12 +358,18 @@ struct mlx5_bf {
struct mlx5_sq_bfreg *bfreg;
};
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
};
struct mlx5_buf buf;
@@ -400,6 +408,8 @@ struct mlx5_ib_qp {
u32 rate_limit;
u32 underlay_qpn;
bool tunnel_offload_en;
+ /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
+ enum ib_qp_type qp_sub_type;
};
struct mlx5_ib_cq_buf {
@@ -632,10 +642,21 @@ struct mlx5_ib_counters {
u32 num_q_counters;
u32 num_cong_counters;
u16 set_id;
+ bool set_id_valid;
+};
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
};
struct mlx5_ib_port {
struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
};
struct mlx5_roce {
@@ -647,12 +668,15 @@ struct mlx5_roce {
struct notifier_block nb;
atomic_t next_port;
enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u8 native_port_num;
};
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
+ u8 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -705,10 +729,50 @@ struct mlx5_ib_delay_drop {
struct mlx5_ib_dbg_delay_drop *dbg;
};
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
- struct mlx5_roce roce;
+ struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
@@ -742,12 +806,14 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ const struct mlx5_ib_profile *profile;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -952,13 +1018,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
@@ -973,7 +1040,6 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
@@ -997,8 +1063,8 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
@@ -1017,6 +1083,15 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 ib_port_num,
+ u8 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 port_num);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1048,8 +1123,8 @@ static inline u32 check_cq_create_flags(u32 flags)
* It returns non-zero value for unsupported CQ
* create flags, otherwise it returns zero.
*/
- return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
}
static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
@@ -1109,10 +1184,10 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
-static inline int get_num_uars(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi)
+static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ee0ee1f9994b..556e015678de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1206,6 +1206,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
bool use_umr = true;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EINVAL);
+
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
@@ -1637,6 +1640,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
+ mr->ibmr.device = pd->device;
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..f1a87a690a4c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1207,10 +1207,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret;
- ret = init_srcu_struct(&dev->mr_srcu);
- if (ret)
- return ret;
-
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
if (ret) {
@@ -1222,11 +1218,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
-{
- cleanup_srcu_struct(&dev->mr_srcu);
-}
-
int mlx5_ib_odp_init(void)
{
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 31ad28853efa..39d24bf694a8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -493,7 +493,7 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
+ return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
return bfregn;
}
-static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--;
@@ -613,6 +613,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -627,7 +628,8 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn)
+ struct mlx5_bfreg_info *bfregi, int bfregn,
+ bool dyn_bfreg)
{
int bfregs_per_sys_page;
int index_of_sys_page;
@@ -637,8 +639,16 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
- offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
}
@@ -764,7 +774,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
- int uar_index;
+ int uar_index = 0;
int npages;
u32 offset = 0;
int bfregn;
@@ -780,12 +790,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
context = to_mucontext(pd->uobject->context);
- /*
- * TBD: should come from the verbs when we have the API
- */
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd.bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+
+ bfregn = MLX5_IB_INVALID_BFREG;
+ } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
+ /*
+ * TBD: should come from the verbs when we have the API
+ */
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
bfregn = MLX5_CROSS_CHANNEL_BFREG;
+ }
else {
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (bfregn < 0) {
@@ -804,8 +822,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
}
- uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -845,7 +865,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
- resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
@@ -874,7 +897,8 @@ err_umem:
ib_umem_release(ubuffer->umem);
err_bfreg:
- free_bfreg(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
@@ -887,7 +911,13 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
- free_bfreg(dev, &context->bfregi, qp->bfregn);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -1015,6 +1045,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
+ (attr->qp_type == MLX5_IB_QPT_DCI) ||
(attr->qp_type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
@@ -2086,20 +2117,108 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
return "IB_QPT_RAW_PACKET";
case MLX5_IB_QPT_REG_UMR:
return "MLX5_IB_QPT_REG_UMR";
+ case IB_QPT_DRIVER:
+ return "IB_QPT_DRIVER";
case IB_QPT_MAX:
default:
return "Invalid QP type";
}
}
+static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ int err = 0;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ void *dctc;
+
+ if (!attr->srq || !attr->recv_cq)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+
+ err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ ucmd, sizeof(*ucmd), &uidx);
+ if (err)
+ return ERR_PTR(err);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ qp->qp_sub_type = MLX5_IB_QPT_DCT;
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+
+ qp->state = IB_QPS_RESET;
+
+ return &qp->ibqp;
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
+{
+ enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
+ int err;
+
+ if (!udata)
+ return -EINVAL;
+
+ if (udata->inlen < sizeof(*ucmd)) {
+ mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ return -EINVAL;
+ }
+ err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
+ if (err)
+ return err;
+
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
+ init_attr->qp_type = MLX5_IB_QPT_DCI;
+ } else {
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
+ init_attr->qp_type = MLX5_IB_QPT_DCT;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid QP flags\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!MLX5_CAP_GEN(dev->mdev, dct)) {
+ mlx5_ib_dbg(dev, "DC transport is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
+ struct ib_qp_init_attr *verbs_init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
int err;
+ struct ib_qp_init_attr mlx_init_attr;
+ struct ib_qp_init_attr *init_attr = verbs_init_attr;
if (pd) {
dev = to_mdev(pd->device);
@@ -2124,6 +2243,26 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
+ if (init_attr->qp_type == IB_QPT_DRIVER) {
+ struct mlx5_ib_create_qp ucmd;
+
+ init_attr = &mlx_init_attr;
+ memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
+ err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
+ if (init_attr->cap.max_recv_wr ||
+ init_attr->cap.max_recv_sge) {
+ mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ }
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
@@ -2145,6 +2284,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
+ case MLX5_IB_QPT_DCI:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -2185,9 +2325,31 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
+ qp->qp_sub_type = init_attr->qp_type;
+
return &qp->ibqp;
}
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
+ }
+ }
+
+ kfree(mqp->dct.in);
+ kfree(mqp);
+ return 0;
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2196,6 +2358,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
+ if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2763,7 +2928,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
- err = to_mlx5_st(ibqp->qp_type);
+ err = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
@@ -2796,8 +2962,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
+ u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce.next_port) %
+ &dev->roce[p].next_port) %
MLX5_MAX_PORTS + 1;
context->flags |= cpu_to_be32(tx_affinity << 24);
}
@@ -2922,7 +3089,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
- mlx5_st = to_mlx5_st(ibqp->qp_type);
+ mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (mlx5_st < 0)
goto out;
@@ -2994,6 +3162,139 @@ out:
return err;
}
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int err = 0;
+ int required = IB_QP_STATE;
+ void *dctc;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ if (!mlx5_ib_dc_atomic_is_supported(dev))
+ return -EOPNOTSUPP;
+ MLX5_SET(dctc, dctc, rae, 1);
+ MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+ MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dctn) +
+ sizeof(resp.dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ resp.response_length = min_resp_len;
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+
+ err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in));
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+ if (err)
+ qp->state = IB_QPS_ERR;
+ else
+ qp->state = new_state;
+ return err;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -3011,8 +3312,14 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ if (ibqp->qp_type == IB_QPT_DRIVER)
+ qp_type = qp->qp_sub_type;
+ else
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
mutex_lock(&qp->mutex);
@@ -3031,15 +3338,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ attr->port_num > dev->num_ports)) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
goto out;
@@ -4358,16 +4671,14 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
struct rdma_ah_attr *ah_attr,
struct mlx5_qp_path *path)
{
- struct mlx5_core_dev *dev = ibdev->mdev;
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
- rdma_ah_set_port_num(ah_attr, path->port);
- if (rdma_ah_get_port_num(ah_attr) == 0 ||
- rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
+ if (!path->port || path->port > ibdev->num_ports)
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+
rdma_ah_set_port_num(ah_attr, path->port);
rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
@@ -4578,6 +4889,71 @@ out:
return err;
}
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ if (err)
+ goto out;
+
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
+
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
+
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
+
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
+}
+
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
@@ -4597,6 +4973,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
+ if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
+
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
@@ -4686,13 +5066,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int err;
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }
kfree(xrcd);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index c6fe89d79248..2fe503e86c1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
if (ret < 0)
goto out;
@@ -623,13 +623,12 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
+ &page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
deleted file mode 100644
index 5fe56e810739..000000000000
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MTHCA_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct mthca_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mthca_reg_mr {
-/*
- * Mark the memory region with a DMA attribute that causes
- * in-flight DMA to be flushed when the region is written to:
- */
-#define MTHCA_MR_DMASYNC 0x1
- __u32 mr_attrs;
- __u32 reserved;
-};
-
-struct mthca_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct mthca_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mthca_resize_cq {
- __u32 lkey;
- __u32 reserved;
-};
-
-struct mthca_create_srq {
- __u32 lkey;
- __u32 db_index;
- __u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mthca_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* MTHCA_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c56ca2a74df5..6cdfbf8c5674 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1365,7 +1365,7 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
if (cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d827d03e3941..b9cc02b4e8d5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -279,7 +279,6 @@ struct nes_cm_tcp_context {
u8 rcv_wscale;
struct nes_cm_tsa_context tsa_cntxt;
- struct timeval sent_ts;
};
@@ -341,7 +340,7 @@ struct nes_cm_node {
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct nes_cm_listener *listener;
enum nes_cm_conn_type conn_type;
struct nes_vnic *nesvnic;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0ba695a88b62..9904918589a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
+ &q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
@@ -1819,12 +1818,11 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -2212,10 +2210,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2263,10 +2260,9 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2320,11 +2316,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index e528d7acb7f6..24d20a4aa262 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,15 +73,13 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
@@ -834,7 +832,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7866fd8051f6..8009bdad4e5b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -461,7 +461,7 @@ retry:
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
struct ocrdma_pd *pd)
{
- return (uctx->cntxt_pd == pd ? true : false);
+ return (uctx->cntxt_pd == pd);
}
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
@@ -550,13 +550,12 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
memset(&resp, 0, sizeof(resp));
@@ -885,13 +884,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 50812b33291b..a9c3378bca38 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index b7587f10e7de..78b49002fbd2 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -164,6 +164,13 @@ struct rdma_srq_sge {
__le32 l_key;
};
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+ __le16 icid; /* internal CID */
+ u8 agg_flags; /* aggregative flags */
+ u8 reserved;
+};
+
/* Rdma doorbell data for SQ and RQ */
struct rdma_pwm_val16_data {
__le16 icid;
@@ -180,12 +187,16 @@ struct rdma_pwm_val32_data {
__le16 icid;
u8 agg_flags;
u8 params;
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
__le32 value;
};
@@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe {
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
- __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
+ __le32 reserved5;
};
/* First element (16 bytes) of fmr wqe */
@@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd {
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
- __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
+ __le32 reserved5;
};
struct rdma_sq_local_inv_wqe {
@@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe {
__le32 xrc_srq;
u8 req_type;
u8 flags;
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
-#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
struct regpair remote_va;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b26aa88dab48..53f00dbf313f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
if (!va)
goto err;
- memset(va, 0, pbl_info->pbl_size);
pbl_table[i].va = va;
pbl_table[i].pa = pa;
}
@@ -3040,7 +3039,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swqe->wqe_size = 2;
swqe2 = qed_chain_produce(&qp->sq.pbl);
- swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
wr, bad_wr);
swqe->length = cpu_to_le32(length);
@@ -3471,9 +3470,9 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
if (qp->state != QED_ROCE_QP_STATE_ERR)
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
@@ -3591,7 +3590,7 @@ static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
wc->byte_len = le32_to_cpu(resp->length);
if (resp->flags & QEDR_RESP_IMM) {
- wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
wc->wc_flags |= IB_WC_WITH_IMM;
if (resp->flags & QEDR_RESP_RDMA)
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 092ed8103842..0235f76bbc72 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1428,8 +1428,6 @@ u64 qib_sps_ints(void);
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
-const char *qib_get_unit_name(int unit);
-const char *qib_get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -1488,15 +1486,15 @@ extern struct mutex qib_mutex;
#define qib_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
##__VA_ARGS__)
#define qib_devinfo(pcidev, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 33d3335385e8..3117cc5f2a9a 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -81,22 +81,6 @@ MODULE_DESCRIPTION("Intel IB driver");
struct qlogic_ib_stats qib_stats;
-const char *qib_get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), "infinipath%u", unit);
- return iname;
-}
-
-const char *qib_get_card_name(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return qib_get_unit_name(dd->unit);
-}
-
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 33a2e74c8495..5838b3bf34b9 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -163,8 +163,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
qib_dev_err(dd,
- "Can't set %s GUID from base, wraps to OUI!\n",
- qib_get_unit_name(t));
+ "Can't set GUID from base, wraps to OUI!\n");
dd->base_guid = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 2d6a191afec0..f7593b5e2b76 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -58,7 +58,7 @@ static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static __poll_t qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
/*
@@ -568,20 +568,16 @@ done:
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{
struct qib_pportdata *ppd = rcd->ppd;
- int i, any = 0, pidx = -1;
+ int i, pidx = -1;
+ bool any = false;
u16 lkey = key & 0x7FFF;
- int ret;
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
/* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
+ return 0;
- if (!lkey) {
- ret = -EINVAL;
- goto bail;
- }
+ if (!lkey)
+ return -EINVAL;
/*
* Set the full membership bit, because it has to be
@@ -594,18 +590,14 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i] && pidx == -1)
pidx = i;
- if (rcd->pkeys[i] == key) {
- ret = -EEXIST;
- goto bail;
- }
+ if (rcd->pkeys[i] == key)
+ return -EEXIST;
}
- if (pidx == -1) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pidx == -1)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
- any++;
+ any = true;
continue;
}
if (ppd->pkeys[i] == key) {
@@ -613,44 +605,34 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
if (atomic_inc_return(pkrefs) > 1) {
rcd->pkeys[pidx] = key;
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any++;
+ return 0;
}
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any = true;
}
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey)
/*
* It makes no sense to have both the limited and
* full membership PKEY set at the same time since
* the unlimited one will disable the limited one.
*/
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
+ return -EEXIST;
}
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!any)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
rcd->pkeys[pidx] = key;
ppd->pkeys[i] = key;
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- ret = 0;
- goto bail;
+ return 0;
}
}
- ret = -EBUSY;
-
-bail:
- return ret;
+ return -EBUSY;
}
/**
@@ -1092,12 +1074,12 @@ bail:
return ret;
}
-static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1114,12 +1096,12 @@ static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1135,10 +1117,10 @@ static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
- unsigned pollflag;
+ __poll_t pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 85dfbba427f6..3990f386aa32 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1119,6 +1119,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
+
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
deleted file mode 100644
index 8fdf79f8d4e4..000000000000
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct rvt_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- qib_get_mr(mr);
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in qib_verbs.c to insure enough bits
- * for generation number
- */
- mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
- ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- qib_get_mr(mr);
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * qib_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void qib_free_lkey(struct rvt_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - ib_rvt_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- qib_put_mr(mr);
- mr->lkey_published = 0;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct rvt_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /* We use RKEY == zero for kernel virtual addresses */
- rcu_read_lock();
- if (rkey == 0) {
- struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / RVT_SEGSZ;
- n = entries_spanned_by_off % RVT_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754fb8579..cfddff45413f 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
@@ -434,13 +432,13 @@ no_flow_control:
qp->s_state = OP(COMPARE_SWAP);
put_ib_ateth_swap(wqe->atomic_wr.swap,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
put_ib_ateth_swap(wqe->atomic_wr.compare_add,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
}
put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
&ohdr->u.atomic_eth);
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done;
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e844d4c8..4662cc7bde92 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37ace44..70c58b88192c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed193ce..386c3c4da0c7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index c55000501582..fabee760407e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1571,7 +1571,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (!ib_qib_sys_image_guid)
ib_qib_sys_image_guid = ppd->guid;
- strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
ibdev->phys_port_cnt = dd->num_pports;
@@ -1586,7 +1585,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 685ef2293cb8..4210ca14014d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,7 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-#include "usnic_ib_sysfs.h"
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index aa2456a4f9bd..a688a5669168 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -47,7 +47,6 @@
#include "usnic_log.h"
#include "usnic_uiom.h"
#include "usnic_transport.h"
-#include "usnic_ib_verbs.h"
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 63bc2efc34eb..44cb1cfba417 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,8 +93,8 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir;
u32 cq_handle;
bool is_kernel;
- atomic_t refcnt;
- wait_queue_head_t wait;
+ refcount_t refcnt;
+ struct completion free;
};
struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
u32 srq_handle;
int npages;
refcount_t refcnt;
- wait_queue_head_t wait;
+ struct completion free;
};
struct pvrdma_qp {
@@ -196,8 +196,8 @@ struct pvrdma_qp {
u8 state;
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
- atomic_t refcnt;
- wait_queue_head_t wait;
+ refcount_t refcnt;
+ struct completion free;
};
struct pvrdma_dev {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 3562c0c30492..faa9478c14a6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
+ cq->is_kernel = !context;
- if (context) {
+ if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem);
} else {
- cq->is_kernel = true;
-
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE;
@@ -178,8 +177,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
- atomic_set(&cq->refcnt, 1);
- init_waitqueue_head(&cq->wait);
+ refcount_set(&cq->refcnt, 1);
+ init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
memset(cmd, 0, sizeof(*cmd));
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
- if (context) {
+ if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
- if (context)
+ if (!cq->is_kernel)
ib_umem_release(cq->umem);
err_cq:
atomic_dec(&dev->num_cqs);
@@ -230,8 +229,9 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
- atomic_dec(&cq->refcnt);
- wait_event(cq->wait, !atomic_read(&cq->refcnt));
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
if (!cq->is_kernel)
ib_umem_release(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 1f4e18717a00..d650a9fcde24 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
- dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
GFP_KERNEL);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
- dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
GFP_KERNEL);
if (!dev->qp_tbl)
goto err_cq_free;
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp)
- atomic_inc(&qp->refcnt);
+ refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) {
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
- atomic_dec(&qp->refcnt);
- if (atomic_read(&qp->refcnt) == 0)
- wake_up(&qp->wait);
+ if (refcount_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
}
}
@@ -360,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) {
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
- atomic_dec(&cq->refcnt);
- if (atomic_read(&cq->refcnt) == 0)
- wake_up(&cq->wait);
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
}
}
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
if (srq) {
if (refcount_dec_and_test(&srq->refcnt))
- wake_up(&srq->wait);
+ complete(&srq->free);
}
}
@@ -533,15 +531,14 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
- atomic_dec(&cq->refcnt);
- if (atomic_read(&cq->refcnt))
- wake_up(&cq->wait);
+ if (refcount_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
}
@@ -885,8 +882,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
@@ -894,7 +891,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Setup the shared region */
- memset(dev->dsr, 0, sizeof(*dev->dsr));
dev->dsr->driver_version = PVRDMA_VERSION;
dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
PVRDMA_GOS_BITS_32 :
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 8519f3212e52..fa96fa4fb829 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,10 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int nchunks;
int ret;
- int entry;
- struct scatterlist *sg;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- nchunks = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
- nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
- if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- nchunks);
+ umem->npages);
ret = -EINVAL;
goto err_umem;
}
@@ -158,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -175,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = nchunks;
+ cmd->nchunks = umem->npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 10420a18d02f..7bf518bdbf21 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
- atomic_set(&qp->refcnt, 1);
- init_waitqueue_head(&qp->wait);
+ refcount_set(&qp->refcnt, 1);
+ init_completion(&qp->free);
qp->state = IB_QPS_RESET;
+ qp->is_kernel = !(pd->uobject && udata);
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n");
@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
- qp->is_kernel = true;
-
ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp);
if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
@@ -428,8 +427,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
- atomic_dec(&qp->refcnt);
- wait_event(qp->wait, !atomic_read(&qp->refcnt));
+ if (refcount_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+
+ if (!qp->is_kernel) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
pvrdma_page_dir_cleanup(dev, &qp->pdir);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 826ccb864596..5acebb1ef631 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1);
- init_waitqueue_head(&srq->wait);
+ init_completion(&srq->free);
dev_dbg(&dev->pdev->dev,
"create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
dev->srq_tbl[srq->srq_handle] = NULL;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
- refcount_dec(&srq->refcnt);
- wait_event(srq->wait, !refcount_read(&srq->refcnt));
+ if (refcount_dec_and_test(&srq->refcnt))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
/* There is no support for kernel clients, so this is safe. */
ib_umem_release(srq->umem);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 97d71e49c092..fb52b669bfce 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -56,7 +56,7 @@
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is solicited
+ * @solicited: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
@@ -101,8 +101,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
@@ -198,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -214,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
@@ -369,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index b3a38c5e4cad..dd11c6fcd060 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -272,7 +272,7 @@ bail:
/**
* rvt_attach_mcast - attach a qp to a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
@@ -335,7 +335,7 @@ bail_mcast:
/**
* rvt_detach_mcast - remove a qp from a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 42713511b53b..1b2e5362a3ff 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -768,7 +768,7 @@ bail:
/**
* rvt_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
+ * @ibfmr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df60742a..c82e6bb3d77c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -269,7 +269,7 @@ no_qp_table:
/**
* free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
+ * @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
@@ -335,9 +335,9 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
/**
* alloc_qpn - Allocate the next available qpn or zero/one for QP type
* IB_QPT_SMI/IB_QPT_GSI
- *@rdi: rvt device info structure
- *@qpt: queue pair number table pointer
- *@port_num: IB port number, 1 based, comes from core
+ * @rdi: rvt device info structure
+ * @qpt: queue pair number table pointer
+ * @port_num: IB port number, 1 based, comes from core
*
* Return: The queue pair number
*/
@@ -1650,9 +1650,9 @@ static inline int rvt_qp_valid_operation(
/**
* rvt_qp_is_avail - determine queue capacity
- * @qp - the qp
- * @rdi - the rdmavt device
- * @reserved_op - is reserved operation
+ * @qp: the qp
+ * @rdi: the rdmavt device
+ * @reserved_op: is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */
if (likely(qp->s_avail))
return 0;
- smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
@@ -2075,6 +2074,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
to = rvt_aeth_to_usec(aeth);
+ trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
}
@@ -2104,17 +2104,14 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
* stop an rnr timer and return if the timer
* had been pending.
*/
-static int rvt_stop_rnr_timer(struct rvt_qp *qp)
+static void rvt_stop_rnr_timer(struct rvt_qp *qp)
{
- int rval = 0;
-
lockdep_assert_held(&qp->s_lock);
/* Remove QP from rnr timer */
if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~RVT_S_WAIT_RNR;
- rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
+ trace_rvt_rnrnak_stop(qp, 0);
}
- return rval;
}
/**
@@ -2167,6 +2164,7 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
spin_lock_irqsave(&qp->s_lock, flags);
rvt_stop_rnr_timer(qp);
+ trace_rvt_rnrnak_timeout(qp, 0);
rdi->driver_f.schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
return HRTIMER_NORESTART;
@@ -2175,8 +2173,8 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
/**
* rvt_qp_iter_init - initial for QP iteration
- * @rdi - rvt devinfo
- * @v - u64 value
+ * @rdi: rvt devinfo
+ * @v: u64 value
*
* This returns an iterator suitable for iterating QPs
* in the system.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index f7c48e9023de..3707952b4364 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -90,7 +90,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
return ERR_PTR(-EINVAL);
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -101,7 +101,10 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct rvt_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
+ srq->rq.wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
+ dev->dparms.node);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -129,16 +132,12 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else {
- srq->ip = NULL;
}
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
@@ -200,7 +199,10 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
+ dev->dparms.node);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index bb4b1e710f22..36ddbd291ee0 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,8 +45,8 @@
*
*/
-#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
-#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rvt_get_ibdev_name(rdi))
#include "trace_rvt.h"
#include "trace_qp.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index 4c77a3119bda..efc9d814b032 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -85,6 +85,48 @@ DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
+DECLARE_EVENT_CLASS(
+ rvt_rnrnak_template,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(void *, hrtimer)
+ __field(u32, s_flags)
+ __field(u32, to)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->hrtimer = &qp->s_rnr_timer;
+ __entry->s_flags = qp->s_flags;
+ __entry->to = to;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->hrtimer,
+ __entry->s_flags,
+ __entry->to
+ )
+);
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_add,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_stop,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
#endif /* __RVT_TRACE_QP_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 64bdd442078a..a4553b2b3696 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -224,7 +224,8 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
* rvt_query_pkey - Return a pkey from the table at a given index
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @intex: Index into pkey table
+ * @index: Index into pkey table
+ * @pkey: returned pkey from the port pkey table
*
* Return: 0 on failure pkey otherwise
*/
@@ -255,7 +256,7 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* rvt_query_gid - Return a gid from the table
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @index: = Index in table
+ * @guid_index: Index in table
* @gid: Gid to return
*
* Return: 0 on success
@@ -297,8 +298,8 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
/**
* rvt_alloc_ucontext - Allocate a user context
- * @ibdev: Vers IB dev
- * @data: User data allocated
+ * @ibdev: Verbs IB dev
+ * @udata: User data allocated
*/
static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
@@ -413,7 +414,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* required for rdmavt to function.
*/
if ((!rdi->driver_f.port_callback) ||
- (!rdi->driver_f.get_card_name) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index f363505312be..8823b2e7aac6 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -63,19 +63,19 @@
#define rvt_pr_info(rdi, fmt, ...) \
__rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_warn(rdi, fmt, ...) \
__rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_err(rdi, fmt, ...) \
__rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 320bffc980d8..bad4a576d7cf 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,8 +1,8 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on NET_UDP_TUNNEL
- depends on CRYPTO_CRC32
+ select NET_UDP_TUNNEL
+ select CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8c3d30b3092d..b7debb6f2eac 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,6 @@ void rxe_release(struct kref *kref)
ib_dealloc_device(&rxe->ib_dev);
}
-void rxe_dev_put(struct rxe_dev *rxe)
-{
- kref_put(&rxe->ref_cnt, rxe_release);
-}
-EXPORT_SYMBOL_GPL(rxe_dev_put);
-
/* initialize rxe device parameters */
static int rxe_init_device_param(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 6447d736d5a4..7d232611303f 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -57,6 +57,7 @@
#include "rxe_hdr.h"
#include "rxe_param.h"
#include "rxe_verbs.h"
+#include "rxe_loc.h"
#define RXE_UVERBS_ABI_VERSION (1)
@@ -95,7 +96,10 @@ void rxe_remove_all(void);
int rxe_rcv(struct sk_buff *skb);
-void rxe_dev_put(struct rxe_dev *rxe);
+static inline void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char *name);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index d7472a442a2c..96c3a6c5c4b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
void rxe_release(struct kref *kref);
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59dee10bebcb..159246b03867 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -82,7 +82,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
}
-struct rxe_recv_sockets recv_sockets;
+static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
@@ -452,31 +452,26 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- struct sk_buff *nskb;
struct rxe_av *av;
int err;
av = rxe_get_av(pkt);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
-
- nskb->destructor = rxe_skb_tx_dtor;
- nskb->sk = pkt->qp->sk->sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (av->network_type == RDMA_NETWORK_IPV4) {
- err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
- kfree_skb(nskb);
+ kfree_skb(skb);
return -EINVAL;
}
@@ -485,7 +480,6 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
- kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 1c06b3bfe1b6..728d8c71b36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,7 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct rxe_recv_sockets recv_sockets;
extern struct notifier_block rxe_net_notifier;
void rxe_release_udp_tunnel(struct socket *sk);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 4469592b839d..137d6c0c49d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+static void rxe_qp_do_cleanup(struct work_struct *work)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
rxe_drop_all_mcast_groups(qp);
@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sock_release(qp->sk);
}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+{
+ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+
+ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index fb8c83e055e1..4c3f899241d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -336,7 +336,6 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
union ib_gid dgid;
union ib_gid *pdgid;
- u16 index;
if (skb->protocol == htons(ETH_P_IP)) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
@@ -348,7 +347,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, rxe->ndev, &index);
+ 1, rxe->ndev, NULL);
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 26a7f923045b..7bdaf71b8221 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid)) {
- rxe_drain_req_pkts(qp, true);
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
goto exit;
- }
-
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
- rxe_drain_req_pkts(qp, true);
- goto exit;
- }
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = consumer_index(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4240866a5331..d37bb9b97569 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -863,8 +863,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_IMMDT_MASK) {
uwc->wc_flags |= IB_WC_WITH_IMM;
- uwc->ex.imm_data =
- (__u32 __force)immdt_imm(pkt);
+ uwc->ex.imm_data = immdt_imm(pkt);
}
if (pkt->mask & RXE_IETH_MASK) {
@@ -1210,7 +1209,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index d03002b9d84d..7210a784abb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
(queue_count(qp->sq.queue) > 1);
rxe_run_task(&qp->req.task, must_sched);
+ if (unlikely(qp->req.state == QP_STATE_ERROR))
+ rxe_run_task(&qp->comp.task, 1);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 0c2dbe45c729..1019f5e7dbdd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,6 +35,7 @@
#define RXE_VERBS_H
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
@@ -281,6 +282,8 @@ struct rxe_qp {
struct timer_list rnr_nak_timer;
spinlock_t state_lock; /* guard requester and completer */
+
+ struct execute_work cleanup_work;
};
enum rxe_mem_state {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd99cdf7..962fbcb57dc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,12 +766,14 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb_orphan(skb);
skb_dst_drop(skb);
- if (netif_queue_stopped(dev))
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS)) {
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
napi_schedule(&priv->send_napi);
- }
+ }
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
@@ -876,7 +878,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
@@ -884,8 +886,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0);
if (ret) {
- printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
- IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
@@ -1145,6 +1147,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
noio_flag = memalloc_noio_save();
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
+ memalloc_noio_restore(noio_flag);
ret = -ENOMEM;
goto err_tx;
}
@@ -1455,8 +1458,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
@@ -1562,7 +1564,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
- printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 3b96cdaf9a83..10384ea50bed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -644,7 +644,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS))
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
@@ -1085,8 +1085,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
netif_addr_unlock_bh(priv->dev);
- err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
- priv->dev, &port, &index);
+ err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index);
netif_addr_lock_bh(priv->dev);
@@ -1236,13 +1235,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
- rtnl_lock();
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- result = ipoib_ib_dev_open(dev);
- rtnl_unlock();
- if (result)
+ if (ipoib_ib_dev_open(dev))
return;
if (netif_queue_stopped(dev))
@@ -1282,7 +1278,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
+ rtnl_lock();
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ rtnl_unlock();
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f911f0e5..5930c7d9a8fb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -768,13 +768,30 @@ static void path_rec_completion(int status,
if (!status) {
struct rdma_ah_attr av;
- if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av))
ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -840,6 +857,23 @@ static void path_rec_completion(int status,
}
}
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -852,21 +886,11 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!path)
return NULL;
- path->dev = dev;
-
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- if (rdma_cap_opa_ah(priv->ca, priv->port))
- path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
- else
- path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
- path->pathrec.numb_path = 1;
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ init_path_rec(priv, path, gid);
return path;
}
@@ -902,8 +926,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +941,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -956,7 +988,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -973,7 +1005,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -983,6 +1015,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -995,6 +1029,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
@@ -1004,6 +1042,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
+ if (!new_path)
+ /* make sure there is no changes in the existing path record */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
@@ -1020,8 +1062,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
} else
__path_add(dev, path);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1041,11 +1082,16 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1091,8 +1137,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
@@ -1663,8 +1710,8 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
- printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
- priv->ca->name, ipoib_sendq_size);
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
@@ -2196,16 +2243,17 @@ static struct net_device *ipoib_add_port(const char *format,
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
- if (!priv)
+ if (!priv) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
goto alloc_mem_failed;
+ }
SET_NETDEV_DEV(priv->dev, hca->dev.parent);
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
if (result) {
- printk(KERN_WARNING "%s: ib_query_port %d failed\n",
- hca->name, port);
+ pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
goto device_init_failed;
}
@@ -2220,8 +2268,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
- printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2238,8 +2286,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
if (result) {
- printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2249,8 +2297,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ipoib_dev_init(priv->dev, hca, port);
if (result) {
- printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: failed to initialize port %d (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2260,8 +2308,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = register_netdev(priv->dev);
if (result) {
- printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
- hca->name, port, result);
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
goto register_failed;
}
@@ -2326,8 +2374,7 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- pr_err("Failed to init port, removing it\n");
- ipoib_remove_one(device, dev_list);
+ kfree(dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149efc1f5..9b3f47ae2016 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a1ed25422b72..984a88096f39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -178,7 +178,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
- printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
@@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
- printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+ pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
@@ -208,7 +208,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
- printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
+ pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2a07692007bd..df49c4eb67f7 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -142,8 +142,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
- iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
- "VA:%#llX + unsol:%d\n",
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
@@ -436,7 +435,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = NULL;
+ struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
@@ -452,10 +451,8 @@ int iser_send_data_out(struct iscsi_conn *conn,
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
- if (tx_desc == NULL) {
- iser_err("Failed to alloc desc for post dataout\n");
+ if (!tx_desc)
return -ENOMEM;
- }
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->cqe.done = iser_dataout_comp;
@@ -475,8 +472,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Offset:%ld & DSL:%ld in Data-Out "
- "inconsistent with total len:%ld, itt:%d\n",
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
@@ -614,8 +610,8 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
iser_conn, rkey);
if (unlikely(!iser_conn->snd_w_inv)) {
- iser_err("conn %p: unexpected remote invalidation, "
- "terminating connection\n", iser_conn);
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
return -EPROTO;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 720dfb3a1ac2..fff40b097947 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ ib_drain_qp(isert_conn->qp);
list_del_init(&isert_conn->node);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -2123,6 +2124,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
u32 rkey, offset;
int ret;
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
@@ -2150,11 +2154,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
+
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index d6fd248320ae..3b296bac4f60 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -126,6 +126,7 @@ struct isert_cmd {
struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4b615c1451e7..15711dcc6f58 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -710,7 +710,7 @@ vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
/**
* opa_vnic_vema_send_trap -- This function sends a trap to the EM
- * @cport: pointer to vnic control port
+ * @adapter: pointer to vnic adapter
* @data: pointer to trap data filled by calling function
* @lid: issuers lid (encap_slid from vesw_port_info)
*
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 972d4b3c5223..b48843833d69 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
+#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -144,7 +145,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
@@ -265,8 +268,8 @@ static void srp_qp_event(struct ib_event *event, void *context)
ib_event_msg(event->event), event->event);
}
-static int srp_init_qp(struct srp_target_port *target,
- struct ib_qp *qp)
+static int srp_init_ib_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
@@ -277,7 +280,7 @@ static int srp_init_qp(struct srp_target_port *target,
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
- be16_to_cpu(target->pkey),
+ be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
@@ -298,32 +301,110 @@ out:
return ret;
}
-static int srp_new_cm_id(struct srp_rdma_ch *ch)
+static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
- srp_cm_handler, ch);
+ srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
- if (ch->cm_id)
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = new_cm_id;
+ if (ch->ib_cm.cm_id)
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
- ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
- ch->path.rec_type = SA_PATH_REC_TYPE_IB;
- ch->path.sgid = target->sgid;
- ch->path.dgid = target->orig_dgid;
- ch->path.pkey = target->pkey;
- ch->path.service_id = target->service_id;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
+ ch->ib_cm.path.sgid = target->sgid;
+ ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
+ ch->ib_cm.path.pkey = target->ib_cm.pkey;
+ ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
+static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
+{
+ switch (((struct sockaddr *)sa)->sa_family) {
+ case AF_INET:
+ snprintf(dst, size, "%pI4",
+ &((struct sockaddr_in *)sa)->sin_addr);
+ break;
+ case AF_INET6:
+ snprintf(dst, size, "%pI6",
+ &((struct sockaddr_in6 *)sa)->sin6_addr);
+ break;
+ default:
+ snprintf(dst, size, "???");
+ break;
+ }
+ return dst;
+}
+
+static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct rdma_cm_id *new_cm_id;
+ char src_addr[64], dst_addr[64];
+ int ret;
+
+ new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ new_cm_id = NULL;
+ goto out;
+ }
+
+ init_completion(&ch->done);
+ ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
+ (struct sockaddr *)&target->rdma_cm.src : NULL,
+ (struct sockaddr *)&target->rdma_cm.dst,
+ SRP_PATH_REC_TIMEOUT_MS);
+ if (ret) {
+ pr_err("No route available from %s to %s (%d)\n",
+ target->rdma_cm.src_specified ?
+ inet_ntop(&target->rdma_cm.src, src_addr,
+ sizeof(src_addr)) : "(any)",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ ret = ch->status;
+ if (ret) {
+ pr_err("Resolving address %s failed (%d)\n",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+
+ swap(ch->rdma_cm.cm_id, new_cm_id);
+
+out:
+ if (new_cm_id)
+ rdma_destroy_id(new_cm_id);
+
+ return ret;
+}
+
+static int srp_new_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
+ srp_new_ib_cm_id(ch);
+}
+
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
@@ -521,16 +602,25 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
- qp = ib_create_qp(dev->pd, init_attr);
- if (IS_ERR(qp)) {
- ret = PTR_ERR(qp);
+ if (target->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
+ qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ qp = ib_create_qp(dev->pd, init_attr);
+ if (!IS_ERR(qp)) {
+ ret = srp_init_ib_qp(target, qp);
+ if (ret)
+ ib_destroy_qp(qp);
+ } else {
+ ret = PTR_ERR(qp);
+ }
+ }
+ if (ret) {
+ pr_err("QP creation failed for dev %s: %d\n",
+ dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
- ret = srp_init_qp(target, qp);
- if (ret)
- goto err_qp;
-
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
@@ -574,7 +664,10 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0;
err_qp:
- ib_destroy_qp(qp);
+ if (target->using_rdma_cm)
+ rdma_destroy_qp(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
@@ -600,9 +693,16 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (!ch->target)
return;
- if (ch->cm_id) {
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = NULL;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id) {
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ ch->rdma_cm.cm_id = NULL;
+ }
+ } else {
+ if (ch->ib_cm.cm_id) {
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = NULL;
+ }
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
@@ -658,16 +758,16 @@ static void srp_path_rec_completion(int status,
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
- ch->path = *pathrec;
+ ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
-static int srp_lookup_path(struct srp_rdma_ch *ch)
+static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret = -ENODEV;
- ch->path.numb_path = 1;
+ ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
@@ -678,10 +778,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (!scsi_host_get(target->scsi_host))
goto out;
- ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
- &ch->path,
+ &ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@@ -690,8 +790,8 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
- ch, &ch->path_query);
- ret = ch->path_query_id;
+ ch, &ch->ib_cm.path_query);
+ ret = ch->ib_cm.path_query_id;
if (ret < 0)
goto put;
@@ -702,7 +802,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
- PFX "Path record query failed\n");
+ PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
+ ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id));
put:
scsi_host_put(target->scsi_host);
@@ -711,6 +814,34 @@ out:
return ret;
}
+static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ init_completion(&ch->done);
+
+ ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ wait_for_completion_interruptible(&ch->done);
+
+ if (ch->status != 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path resolution failed\n");
+
+ return ch->status;
+}
+
+static int srp_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
+ srp_ib_lookup_path(ch);
+}
+
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
@@ -732,48 +863,76 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
struct {
- struct ib_cm_req_param param;
- struct srp_login_req priv;
+ struct rdma_conn_param rdma_param;
+ struct srp_login_req_rdma rdma_req;
+ struct ib_cm_req_param ib_param;
+ struct srp_login_req ib_req;
} *req = NULL;
+ char *ipi, *tpi;
int status;
- u8 subnet_timeout;
-
- subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->param.primary_path = &ch->path;
- req->param.alternate_path = NULL;
- req->param.service_id = target->service_id;
- req->param.qp_num = ch->qp->qp_num;
- req->param.qp_type = ch->qp->qp_type;
- req->param.private_data = &req->priv;
- req->param.private_data_len = sizeof req->priv;
- req->param.flow_control = 1;
-
- get_random_bytes(&req->param.starting_psn, 4);
- req->param.starting_psn &= 0xffffff;
+ req->ib_param.flow_control = 1;
+ req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
- req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = subnet_timeout + 2;
- req->param.local_cm_response_timeout = subnet_timeout + 2;
- req->param.retry_count = target->tl_retry_count;
- req->param.rnr_retry_count = 7;
- req->param.max_cm_retries = 15;
-
- req->priv.opcode = SRP_LOGIN_REQ;
- req->priv.tag = 0;
- req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
- req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ req->ib_param.responder_resources = 4;
+ req->ib_param.rnr_retry_count = 7;
+ req->ib_param.max_cm_retries = 15;
+
+ req->ib_req.opcode = SRP_LOGIN_REQ;
+ req->ib_req.tag = 0;
+ req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
+ req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
- SRP_MULTICHAN_SINGLE);
+ req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
+ SRP_MULTICHAN_SINGLE);
+
+ if (target->using_rdma_cm) {
+ req->rdma_param.flow_control = req->ib_param.flow_control;
+ req->rdma_param.responder_resources =
+ req->ib_param.responder_resources;
+ req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
+ req->rdma_param.retry_count = req->ib_param.retry_count;
+ req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
+ req->rdma_param.private_data = &req->rdma_req;
+ req->rdma_param.private_data_len = sizeof(req->rdma_req);
+
+ req->rdma_req.opcode = req->ib_req.opcode;
+ req->rdma_req.tag = req->ib_req.tag;
+ req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
+ req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
+ req->rdma_req.req_flags = req->ib_req.req_flags;
+
+ ipi = req->rdma_req.initiator_port_id;
+ tpi = req->rdma_req.target_port_id;
+ } else {
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
+
+ req->ib_param.primary_path = &ch->ib_cm.path;
+ req->ib_param.alternate_path = NULL;
+ req->ib_param.service_id = target->ib_cm.service_id;
+ get_random_bytes(&req->ib_param.starting_psn, 4);
+ req->ib_param.starting_psn &= 0xffffff;
+ req->ib_param.qp_num = ch->qp->qp_num;
+ req->ib_param.qp_type = ch->qp->qp_type;
+ req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.private_data = &req->ib_req;
+ req->ib_param.private_data_len = sizeof(req->ib_req);
+
+ ipi = req->ib_req.initiator_port_id;
+ tpi = req->ib_req.target_port_id;
+ }
+
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
@@ -784,19 +943,15 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
- memcpy(req->priv.initiator_port_id,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->initiator_ext, 8);
- memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
- memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
+ memcpy(ipi, &target->sgid.global.interface_id, 8);
+ memcpy(ipi + 8, &target->initiator_ext, 8);
+ memcpy(tpi, &target->ioc_guid, 8);
+ memcpy(tpi + 8, &target->id_ext, 8);
} else {
- memcpy(req->priv.initiator_port_id,
- &target->initiator_ext, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.target_port_id, &target->id_ext, 8);
- memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
+ memcpy(ipi, &target->initiator_ext, 8);
+ memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
+ memcpy(tpi, &target->id_ext, 8);
+ memcpy(tpi + 8, &target->ioc_guid, 8);
}
/*
@@ -809,12 +964,14 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
- memset(req->priv.initiator_port_id, 0, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->srp_host->srp_dev->dev->node_guid, 8);
+ memset(ipi, 0, 8);
+ memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
- status = ib_send_cm_req(ch->cm_id, &req->param);
+ if (target->using_rdma_cm)
+ status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
+ else
+ status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
@@ -841,14 +998,23 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
- int i;
+ int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
- if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+ ret = 0;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id)
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ if (ch->ib_cm.cm_id)
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
+ NULL, 0);
+ }
+ if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
@@ -962,6 +1128,7 @@ static void srp_remove_target(struct srp_target_port *target)
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
@@ -2349,7 +2516,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
- int ret;
+ int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
@@ -2379,40 +2546,42 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
- if (!qp_attr)
- goto error;
-
- qp_attr->qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
-
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
-
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
- goto error_free;
+ goto error;
}
- qp_attr->qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
+ if (!target->using_rdma_cm) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto error;
- target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+ qp_attr->qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ qp_attr->qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ }
error_free:
kfree(qp_attr);
@@ -2421,41 +2590,43 @@ error:
ch->status = ret;
}
-static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
- struct srp_rdma_ch *ch)
+static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event,
+ struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
+ u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
- ch->path.pkey = cpi->redirect_pkey;
+ dlid = be16_to_cpu(cpi->redirect_lid);
+ sa_path_set_dlid(&ch->ib_cm.path, dlid);
+ ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
- memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
+ memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
- ch->status = sa_path_get_dlid(&ch->path) ?
- SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
+ union ib_gid *dgid = &ch->ib_cm.path.dgid;
+
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
- memcpy(ch->path.dgid.raw,
- event->param.rej_rcvd.ari, 16);
+ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
- be64_to_cpu(ch->path.dgid.global.subnet_prefix),
- be64_to_cpu(ch->path.dgid.global.interface_id));
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
@@ -2484,7 +2655,8 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
- target->orig_dgid.raw, reason);
+ target->ib_cm.orig_dgid.raw,
+ reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@@ -2504,7 +2676,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -2527,7 +2699,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
- srp_cm_rej_handler(cm_id, event, ch);
+ srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
@@ -2565,6 +2737,135 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
+static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
+ struct rdma_cm_event *event)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ int opcode;
+
+ switch (event->status) {
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->param.conn.private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej =
+ (struct srp_login_rej *)
+ event->param.conn.private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
+ opcode);
+ }
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->status);
+ ch->status = -ECONNRESET;
+ break;
+ }
+}
+
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ ch->status = -ENXIO;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ ch->status = -EHOSTUNREACH;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ comp = 1;
+ srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
+ break;
+
+ case RDMA_CM_EVENT_REJECTED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_rdma_cm_rej_handler(ch, event);
+ break;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->connected) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "received DREQ\n");
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ comp = 1;
+ ch->status = 0;
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ break;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+
+ comp = 1;
+ ch->status = 0;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
@@ -2717,6 +3018,16 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct srp_target_port *target = host_to_target(shost);
+
+ if (target->target_can_queue)
+ starget->can_queue = target->target_can_queue;
+ return 0;
+}
+
static int srp_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2766,7 +3077,10 @@ static ssize_t show_service_id(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2774,7 +3088,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2791,7 +3107,9 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
- return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@@ -2799,7 +3117,9 @@ static ssize_t show_orig_dgid(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@@ -2921,6 +3241,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .target_alloc = srp_target_alloc,
.slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
@@ -3044,6 +3365,9 @@ static bool srp_conn_unique(struct srp_host *host,
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
+ (!target->using_rdma_cm ||
+ memcmp(&target->rdma_cm.dst, &t->rdma_cm.dst,
+ sizeof(target->rdma_cm.dst)) == 0) &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
@@ -3060,6 +3384,9 @@ out:
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
+ * or
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
+ * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
@@ -3080,11 +3407,20 @@ enum {
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
- SRP_OPT_ALL = (SRP_OPT_ID_EXT |
- SRP_OPT_IOC_GUID |
- SRP_OPT_DGID |
- SRP_OPT_PKEY |
- SRP_OPT_SERVICE_ID),
+ SRP_OPT_IP_SRC = 1 << 15,
+ SRP_OPT_IP_DEST = 1 << 16,
+ SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+};
+
+static unsigned int srp_opt_mandatory[] = {
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID,
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
@@ -3095,6 +3431,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
+ { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
@@ -3103,15 +3440,33 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { SRP_OPT_IP_SRC, "src=%s" },
+ { SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_ERR, NULL }
};
-static int srp_parse_options(const char *buf, struct srp_target_port *target)
+static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ const char *addr_port_str)
+{
+ char *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str = addr;
+ int ret;
+
+ if (!addr)
+ return -ENOMEM;
+ strsep(&port_str, ":");
+ ret = inet_pton_with_scope(net, AF_UNSPEC, addr, port_str, sa);
+ kfree(addr);
+ return ret;
+}
+
+static int srp_parse_options(struct net *net, const char *buf,
+ struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
- char dgid[3];
substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
int opt_mask = 0;
int token;
int ret = -EINVAL;
@@ -3136,7 +3491,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid id_ext parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->id_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3146,7 +3507,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid ioc_guid parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ioc_guid = cpu_to_be64(ull);
kfree(p);
break;
@@ -3162,16 +3529,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
goto out;
}
- for (i = 0; i < 16; ++i) {
- strlcpy(dgid, p + i * 2, sizeof(dgid));
- if (sscanf(dgid, "%hhx",
- &target->orig_dgid.raw[i]) < 1) {
- ret = -EINVAL;
- kfree(p);
- goto out;
- }
- }
+ ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
+ if (ret < 0)
+ goto out;
break;
case SRP_OPT_PKEY:
@@ -3179,7 +3540,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
- target->pkey = cpu_to_be16(token);
+ target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
@@ -3188,7 +3549,45 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad service_id parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ib_cm.service_id = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_SRC:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->rdma_cm.src_specified = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_DEST:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->using_rdma_cm = true;
kfree(p);
break;
@@ -3221,6 +3620,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->cmd_per_lun = token;
break;
+ case SRP_OPT_TARGET_CAN_QUEUE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
case SRP_OPT_IO_CLASS:
if (match_hex(args, &token)) {
pr_warn("bad IO class parameter '%s'\n", p);
@@ -3242,7 +3650,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad initiator_ext value '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->initiator_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3297,14 +3711,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
}
}
- if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
- ret = 0;
- else
- for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
- if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
- !(srp_opt_tokens[i].token & opt_mask))
- pr_warn("target creation request is missing parameter '%s'\n",
- srp_opt_tokens[i].pattern);
+ for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
+ if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret)
+ pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
@@ -3345,6 +3759,7 @@ static ssize_t srp_create_target(struct device *dev,
target = host_to_target(target_host);
+ target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
@@ -3366,18 +3781,29 @@ static ssize_t srp_create_target(struct device *dev,
if (ret < 0)
goto put;
- ret = srp_parse_options(buf, target);
+ ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
- shost_printk(KERN_INFO, target->scsi_host,
- PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be64_to_cpu(target->initiator_ext));
+ if (target->using_rdma_cm) {
+ char dst_addr[64];
+
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)));
+ } else {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be64_to_cpu(target->initiator_ext));
+ }
ret = -EEXIST;
goto out;
}
@@ -3478,11 +3904,18 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_connect_ch(ch, multich);
if (ret) {
+ char dst[64];
+
+ if (target->using_rdma_cm)
+ inet_ntop(&target->rdma_cm.dst, dst,
+ sizeof(dst));
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %pI6 failed\n",
+ PFX "Connection %d/%d to %s failed\n",
ch_start + cpu_idx,
- target->ch_count,
- ch->target->orig_dgid.raw);
+ target->ch_count, dst);
if (node_idx == 0 && cpu_idx == 0) {
goto free_ch;
} else {
@@ -3507,13 +3940,25 @@ connected:
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
- shost_printk(KERN_DEBUG, target->scsi_host, PFX
- "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be16_to_cpu(target->pkey),
- be64_to_cpu(target->service_id),
- target->sgid.raw, target->orig_dgid.raw);
+ if (target->using_rdma_cm) {
+ char dst[64];
+
+ inet_ntop(&target->rdma_cm.dst, dst, sizeof(dst));
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ target->sgid.raw, dst);
+ } else {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id),
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw);
+ }
}
ret = count;
@@ -3523,8 +3968,16 @@ out:
put:
scsi_host_put(target->scsi_host);
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If a call to srp_remove_target() has not been scheduled,
+ * drop the network namespace reference now that was obtained
+ * earlier in this function.
+ */
+ if (target->state != SRP_TARGET_REMOVED)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
+ }
return ret;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index a814f5ef16f9..a2706086b9c7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -45,6 +45,7 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_fmr_pool.h>
+#include <rdma/rdma_cm.h>
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000,
@@ -153,11 +154,18 @@ struct srp_rdma_ch {
struct completion done;
int status;
- struct sa_path_rec path;
- struct ib_sa_query *path_query;
- int path_query_id;
+ union {
+ struct ib_cm {
+ struct sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct rdma_cm {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
- struct ib_cm_id *cm_id;
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
struct srp_request *req_ring;
@@ -182,6 +190,7 @@ struct srp_target_port {
/* read only in the hot path */
u32 global_rkey;
struct srp_rdma_ch *ch;
+ struct net *net;
u32 ch_count;
u32 lkey;
enum srp_target_state state;
@@ -194,7 +203,6 @@ struct srp_target_port {
union ib_gid sgid;
__be64 id_ext;
__be64 ioc_guid;
- __be64 service_id;
__be64 initiator_ext;
u16 io_class;
struct srp_host *srp_host;
@@ -203,6 +211,7 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ unsigned int target_can_queue;
int mr_pool_size;
int mr_per_cmd;
int queue_size;
@@ -210,8 +219,28 @@ struct srp_target_port {
int comp_vector;
int tl_retry_count;
- union ib_gid orig_dgid;
- __be16 pkey;
+ bool using_rdma_cm;
+
+ union {
+ struct {
+ __be64 service_id;
+ union ib_gid orig_dgid;
+ __be16 pkey;
+ } ib_cm;
+ struct {
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } src;
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } dst;
+ bool src_specified;
+ } rdma_cm;
+ };
u32 rq_tmo_jiffies;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd354b1cc..0373b7c40902 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
@@ -120,7 +121,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
}
/**
- * srpt_event_handler() - Asynchronous IB event callback function.
+ * srpt_event_handler - asynchronous IB event callback function
+ * @handler: IB event handler registered by ib_register_event_handler().
+ * @event: Description of the event that occurred.
*
* Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also
@@ -132,6 +135,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
{
struct srpt_device *sdev;
struct srpt_port *sport;
+ u8 port_num;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
@@ -142,10 +146,15 @@ static void srpt_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
sport->lid = 0;
sport->sm_lid = 0;
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
case IB_EVENT_PORT_ACTIVE:
@@ -155,25 +164,31 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
default:
- pr_err("received unrecognized IB event %d\n",
- event->event);
+ pr_err("received unrecognized IB event %d\n", event->event);
break;
}
}
/**
- * srpt_srq_event() - SRQ event callback function.
+ * srpt_srq_event - SRQ event callback function
+ * @event: Description of the event that occurred.
+ * @ctx: Context pointer specified at SRQ creation time.
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- pr_info("SRQ event %d\n", event->event);
+ pr_debug("SRQ event %d\n", event->event);
}
static const char *get_ch_state_name(enum rdma_ch_state s)
@@ -194,16 +209,18 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
}
/**
- * srpt_qp_event() - QP event callback function.
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+ * @ch: SRPT RDMA channel.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
+ event->event, ch, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
- ib_cm_notify(ch->cm_id, event->event);
+ ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_debug("%s-%d, state %s: received Last WQE event.\n",
@@ -217,8 +234,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
}
/**
- * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
- *
+ * srpt_set_ioc - initialize a IOUnitInfo structure
+ * @c_list: controller list.
* @slot: one-based slot number.
* @value: four-bit value.
*
@@ -241,7 +258,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
}
/**
- * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
*
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification.
@@ -260,7 +278,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
}
/**
- * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ * srpt_get_iou - write IOUnitInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
*
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document.
@@ -284,7 +303,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
}
/**
- * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ * srpt_get_ioc - write IOControllerprofile to a management datagram
+ * @sport: HCA port through which the MAD has been received.
+ * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
*
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP
@@ -314,7 +336,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (sdev->use_srq)
send_queue_depth = sdev->srq_size;
else
- send_queue_depth = min(SRPT_RQ_SIZE,
+ send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp));
@@ -342,7 +364,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
}
/**
- * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ * srpt_get_svc_entries - write ServiceEntries to a management datagram
+ * @ioc_guid: I/O controller GUID to use in reply.
+ * @slot: I/O controller number.
+ * @hi: End of the range of service entries to be specified in the reply.
+ * @lo: Start of the range of service entries to be specified in the reply..
+ * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
*
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document.
@@ -379,8 +406,8 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
/**
- * srpt_mgmt_method_get() - Process a received management datagram.
- * @sp: source port through which the MAD has been received.
+ * srpt_mgmt_method_get - process a received management datagram
+ * @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD.
* @rsp_mad: response MAD.
*/
@@ -419,7 +446,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
}
/**
- * srpt_mad_send_handler() - Post MAD-send callback function.
+ * srpt_mad_send_handler - MAD send completion callback
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @mad_wc: Work completion reporting that the MAD has been sent.
*/
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
@@ -429,7 +458,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
}
/**
- * srpt_mad_recv_handler() - MAD reception callback function.
+ * srpt_mad_recv_handler - MAD reception callback function
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @send_buf: Not used.
+ * @mad_wc: Work completion reporting that a MAD has been received.
*/
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -493,8 +525,18 @@ err:
ib_free_recv_mad(mad_wc);
}
+static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+{
+ const __be16 *g = (const __be16 *)guid;
+
+ return snprintf(buf, size, "%04x:%04x:%04x:%04x",
+ be16_to_cpu(g[0]), be16_to_cpu(g[1]),
+ be16_to_cpu(g[2]), be16_to_cpu(g[3]));
+}
+
/**
- * srpt_refresh_port() - Configure a HCA port.
+ * srpt_refresh_port - configure a HCA port
+ * @sport: SRPT HCA port.
*
* Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs
@@ -507,7 +549,6 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
- __be16 *guid;
int ret;
memset(&port_modify, 0, sizeof(port_modify));
@@ -531,11 +572,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
sport->port_guid_wwn.priv = sport;
- guid = (__be16 *)&sport->gid.global.interface_id;
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
+ srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ &sport->gid.global.interface_id);
sport->port_gid_wwn.priv = sport;
snprintf(sport->port_gid, sizeof(sport->port_gid),
"0x%016llx%016llx",
@@ -577,7 +615,8 @@ err_mod_port:
}
/**
- * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ * srpt_unregister_mad_agent - unregister MAD callback functions
+ * @sdev: SRPT HCA pointer.
*
* Note: It is safe to call this function more than once for the same device.
*/
@@ -602,7 +641,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
}
/**
- * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ * srpt_alloc_ioctx - allocate a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx_size: I/O context size.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size, int dma_size,
@@ -633,7 +676,11 @@ err:
}
/**
- * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ * srpt_free_ioctx - free a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx: I/O context pointer.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
int dma_size, enum dma_data_direction dir)
@@ -647,7 +694,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
}
/**
- * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size.
@@ -685,7 +732,12 @@ out:
}
/**
- * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
+ * @ioctx_ring: I/O context ring to be freed.
+ * @sdev: SRPT HCA pointer.
+ * @ring_size: Number of ring elements.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size,
@@ -702,23 +754,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
}
/**
- * srpt_get_cmd_state() - Get the state of a SCSI command.
- */
-static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return state;
-}
-
-/**
- * srpt_set_cmd_state() - Set the state of a SCSI command.
+ * srpt_set_cmd_state - set the state of a SCSI command
+ * @ioctx: Send I/O context.
+ * @new: New I/O context state.
*
* Does not modify the state of aborted commands. Returns the previous command
* state.
@@ -727,21 +765,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous != SRPT_STATE_DONE)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous;
}
/**
- * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ * srpt_test_and_set_cmd_state - test and set the state of a command
+ * @ioctx: Send I/O context.
+ * @old: Current I/O context state.
+ * @new: New I/O context state.
*
* Returns true if and only if the previous command state was equal to 'old'.
*/
@@ -750,22 +786,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW);
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous == old)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
return previous == old;
}
/**
- * srpt_post_recv() - Post an IB receive request.
+ * srpt_post_recv - post an IB receive request
+ * @sdev: SRPT HCA pointer.
+ * @ch: SRPT RDMA channel.
+ * @ioctx: Receive I/O context pointer.
*/
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
@@ -791,7 +828,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
}
/**
- * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ * srpt_zerolength_write - perform a zero-length RDMA write
+ * @ch: SRPT RDMA channel.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE
@@ -802,6 +840,9 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
struct ib_send_wr wr, *bad_wr;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+
memset(&wr, 0, sizeof(wr));
wr.opcode = IB_WR_RDMA_WRITE;
wr.wr_cqe = &ch->zw_cqe;
@@ -813,13 +854,17 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = cq->cq_context;
+ pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
+ wc->status);
+
if (wc->status == IB_WC_SUCCESS) {
srpt_process_wait_list(ch);
} else {
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
- WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ pr_debug("%s-%d: already disconnected.\n",
+ ch->sess_name, ch->qp->qp_num);
}
}
@@ -928,11 +973,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
}
/**
- * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be
* written.
+ * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
+ * @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written.
*
@@ -998,7 +1045,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
/**
- * srpt_init_ch_qp() - Initialize queue pair attributes.
+ * srpt_init_ch_qp - initialize queue pair attributes
+ * @ch: SRPT RDMA channel.
+ * @qp: Queue pair pointer.
*
* Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
@@ -1013,10 +1062,14 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
return -ENOMEM;
attr->qp_state = IB_QPS_INIT;
- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
- attr->pkey_index = 0;
+
+ ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
+ ch->pkey, &attr->pkey_index);
+ if (ret < 0)
+ pr_err("Translating pkey %#x failed (%d) - using index 0\n",
+ ch->pkey, ret);
ret = ib_modify_qp(qp, attr,
IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
@@ -1027,7 +1080,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
}
/**
- * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1044,7 +1097,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1057,7 +1110,7 @@ out:
}
/**
- * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1074,7 +1127,7 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1087,7 +1140,8 @@ out:
}
/**
- * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ * srpt_ch_qp_err - set the channel queue pair state to 'error'
+ * @ch: SRPT RDMA channel.
*/
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
@@ -1098,7 +1152,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
+ * @ch: SRPT RDMA channel.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
@@ -1120,11 +1175,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0;
- init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
* transport_init_se_cmd() does not initialize all fields, so do it
@@ -1137,14 +1190,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_abort_cmd() - Abort a SCSI command.
+ * srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command.
- * @context: Preferred execution context.
*/
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
enum srpt_command_state state;
- unsigned long flags;
BUG_ON(!ioctx);
@@ -1153,7 +1204,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* the ib_srpt driver, change the state to the next state.
*/
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEED_DATA:
@@ -1168,7 +1218,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
__func__, state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag);
@@ -1207,6 +1256,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
/**
+ * srpt_rdma_read_done - RDMA read completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to
@@ -1234,11 +1287,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
target_execute_cmd(&ioctx->cmd);
else
pr_err("%s[%d]: wrong state = %d\n", __func__,
- __LINE__, srpt_get_cmd_state(ioctx));
+ __LINE__, ioctx->state);
}
/**
- * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will
@@ -1298,7 +1351,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
}
/**
- * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response.
@@ -1346,7 +1399,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
}
/**
- * srpt_handle_cmd() - Process SRP_CMD.
+ * srpt_handle_cmd - process a SRP_CMD information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*/
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
@@ -1428,7 +1484,10 @@ static int srp_tmr_to_tcm(int fn)
}
/**
- * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*
* Returns 0 if and only if the request will be processed by the target core.
*
@@ -1450,9 +1509,9 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srp_tsk = recv_ioctx->ioctx.buf;
cmd = &send_ioctx->cmd;
- pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
- " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
- srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
+ ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
@@ -1471,41 +1530,42 @@ fail:
}
/**
- * srpt_handle_new_iu() - Process a newly received information unit.
+ * srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received.
- * @ioctx: SRPT I/O context associated with the information unit.
+ * @recv_ioctx: Receive I/O context associated with the information unit.
*/
-static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static bool
+srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
{
+ struct srpt_send_ioctx *send_ioctx = NULL;
struct srp_cmd *srp_cmd;
+ bool res = false;
+ u8 opcode;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto push;
+
ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING))
- goto out_wait;
-
- if (unlikely(ch->state != CH_LIVE))
- return;
-
srp_cmd = recv_ioctx->ioctx.buf;
- if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx) {
- if (!list_empty(&ch->cmd_wait_list))
- goto out_wait;
- send_ioctx = srpt_get_send_ioctx(ch);
- }
+ opcode = srp_cmd->opcode;
+ if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
+ send_ioctx = srpt_get_send_ioctx(ch);
if (unlikely(!send_ioctx))
- goto out_wait;
+ goto push;
}
- switch (srp_cmd->opcode) {
+ if (!list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(!ch->processing_wait_list);
+ list_del_init(&recv_ioctx->wait_list);
+ }
+
+ switch (opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
break;
@@ -1525,16 +1585,22 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_err("Received SRP_RSP\n");
break;
default:
- pr_err("received IU with unknown opcode 0x%x\n",
- srp_cmd->opcode);
+ pr_err("received IU with unknown opcode 0x%x\n", opcode);
break;
}
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
- return;
+ res = true;
+
+out:
+ return res;
-out_wait:
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+push:
+ if (list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(ch->processing_wait_list);
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ }
+ goto out;
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1549,10 +1615,10 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
pr_err("req_lim = %d < 0\n", req_lim);
- srpt_handle_new_iu(ch, ioctx, NULL);
+ srpt_handle_new_iu(ch, ioctx);
} else {
- pr_info("receiving failed for ioctx %p with status %d\n",
- ioctx, wc->status);
+ pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
+ ioctx, wc->status);
}
}
@@ -1563,22 +1629,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_recv_ioctx *recv_ioctx, *tmp;
+
+ WARN_ON_ONCE(ch->state == CH_CONNECTING);
- while (!list_empty(&ch->cmd_wait_list) &&
- ch->state >= CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
+ if (list_empty(&ch->cmd_wait_list))
+ return;
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ WARN_ON_ONCE(ch->processing_wait_list);
+ ch->processing_wait_list = true;
+ list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
+ wait_list) {
+ if (!srpt_handle_new_iu(ch, recv_ioctx))
+ break;
}
+ ch->processing_wait_list = false;
}
/**
+ * srpt_send_done - send completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta
@@ -1620,7 +1692,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * srpt_create_ch_ib() - Create receive and send completion queues.
+ * srpt_create_ch_ib - create receive and send completion queues
+ * @ch: SRPT RDMA channel.
*/
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
@@ -1628,7 +1701,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
- u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int sq_size = sport->port_attrib.srp_sq_size;
int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1639,12 +1712,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out;
retry:
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
+ ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + srp_sq_size, ret);
+ ch->rq_size + sq_size, ret);
goto out;
}
@@ -1662,8 +1735,8 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
- qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+ qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
@@ -1677,8 +1750,8 @@ retry:
if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
- srp_sq_size /= 2;
- if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+ sq_size /= 2;
+ if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
@@ -1689,9 +1762,9 @@ retry:
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
- qp_init->cap.max_send_wr, ch->cm_id);
+ qp_init->cap.max_send_wr, ch);
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
@@ -1719,7 +1792,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * srpt_close_ch() - Close an RDMA channel.
+ * srpt_close_ch - close a RDMA channel
+ * @ch: SRPT RDMA channel.
*
* Make sure all resources associated with the channel will be deallocated at
* an appropriate time.
@@ -1744,8 +1818,6 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
pr_err("%s-%d: changing queue pair into error state failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
- pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
- ch->qp->qp_num);
ret = srpt_zerolength_write(ch);
if (ret < 0) {
pr_err("%s-%d: queuing zero-length write failed: %d\n",
@@ -1777,9 +1849,9 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
- ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
- ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
@@ -1787,83 +1859,135 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-/*
- * Send DREQ and wait for DREP. Return true if and only if this function
- * changed the state of @ch.
- */
-static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
- __must_hold(&sdev->mutex)
+static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
+ struct srpt_nexus *nexus;
+ struct srpt_rdma_ch *ch2;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (ch2 == ch) {
+ res = false;
+ goto done;
+ }
+ }
+ }
+done:
+ rcu_read_unlock();
- lockdep_assert_held(&sdev->mutex);
+ return res;
+}
+
+/* Send DREQ and wait for DREP. */
+static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+{
+ struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
- WARN_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
+ mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
- if (!wait)
- goto out;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
+ 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
-out:
- mutex_lock(&sdev->mutex);
- return wait;
}
-static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
- __must_hold(&sdev->mutex)
+static void __srpt_close_all_ch(struct srpt_port *sport)
{
- struct srpt_device *sdev = sport->sdev;
+ struct srpt_nexus *nexus;
struct srpt_rdma_ch *ch;
- lockdep_assert_held(&sdev->mutex);
+ lockdep_assert_held(&sport->mutex);
- if (sport->enabled == enabled)
- return;
- sport->enabled = enabled;
- if (sport->enabled)
- return;
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sport->sdev->device->name, sport->port);
+ srpt_close_ch(ch);
+ }
+ }
+}
-again:
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_info("%s: closing channel %s-%d\n",
- sdev->device->name, ch->sess_name,
- ch->qp->qp_num);
- if (srpt_disconnect_ch_sync(ch))
- goto again;
+/*
+ * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
+ * it does not yet exist.
+ */
+static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
+ const u8 i_port_id[16],
+ const u8 t_port_id[16])
+{
+ struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
+
+ for (;;) {
+ mutex_lock(&sport->mutex);
+ list_for_each_entry(n, &sport->nexus_list, entry) {
+ if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
+ memcmp(n->t_port_id, t_port_id, 16) == 0) {
+ nexus = n;
+ break;
+ }
}
+ if (!nexus && tmp_nexus) {
+ list_add_tail_rcu(&tmp_nexus->entry,
+ &sport->nexus_list);
+ swap(nexus, tmp_nexus);
+ }
+ mutex_unlock(&sport->mutex);
+
+ if (nexus)
+ break;
+ tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!tmp_nexus) {
+ nexus = ERR_PTR(-ENOMEM);
+ break;
+ }
+ INIT_LIST_HEAD(&tmp_nexus->ch_list);
+ memcpy(tmp_nexus->i_port_id, i_port_id, 16);
+ memcpy(tmp_nexus->t_port_id, t_port_id, 16);
}
+ kfree(tmp_nexus);
+
+ return nexus;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sport->mutex)
+{
+ lockdep_assert_held(&sport->mutex);
+
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (!enabled)
+ __srpt_close_all_ch(sport);
}
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
- kfree(ch);
+ kfree_rcu(ch, rcu);
}
static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct srpt_port *sport;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
- ch->qp->qp_num, ch->release_done);
+ pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -1878,169 +2002,141 @@ static void srpt_release_channel_work(struct work_struct *w)
transport_deregister_session(se_sess);
ch->sess = NULL;
- ib_destroy_cm_id(ch->cm_id);
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
srp_max_req_size, DMA_FROM_DEVICE);
- mutex_lock(&sdev->mutex);
- list_del_init(&ch->list);
- if (ch->release_done)
- complete(ch->release_done);
- mutex_unlock(&sdev->mutex);
+ sport = ch->sport;
+ mutex_lock(&sport->mutex);
+ list_del_rcu(&ch->list);
+ mutex_unlock(&sport->mutex);
- wake_up(&sdev->ch_releaseQ);
+ wake_up(&sport->ch_releaseQ);
kref_put(&ch->kref, srpt_free_ch);
}
/**
- * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
+ * @cm_id: IB/CM connection identifier.
+ * @port_num: Port through which the IB/CM REQ message was received.
+ * @pkey: P_Key of the incoming connection.
+ * @req: SRP login request.
+ * @src_addr: GID of the port that submitted the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
- void *private_data)
+ u8 port_num, __be16 pkey,
+ const struct srp_login_req *req,
+ const char *src_addr)
{
struct srpt_device *sdev = cm_id->context;
- struct srpt_port *sport = &sdev->port[param->port - 1];
- struct srp_login_req *req;
- struct srp_login_rsp *rsp;
- struct srp_login_rej *rej;
- struct ib_cm_rep_param *rep_param;
- struct srpt_rdma_ch *ch, *tmp_ch;
- __be16 *guid;
+ struct srpt_port *sport = &sdev->port[port_num - 1];
+ struct srpt_nexus *nexus;
+ struct srp_login_rsp *rsp = NULL;
+ struct srp_login_rej *rej = NULL;
+ struct ib_cm_rep_param *rep_param = NULL;
+ struct srpt_rdma_ch *ch;
+ char i_port_id[36];
u32 it_iu_len;
- int i, ret = 0;
+ int i, ret;
WARN_ON_ONCE(irqs_disabled());
- if (WARN_ON(!sdev || !private_data))
+ if (WARN_ON(!sdev || !req))
return -EINVAL;
- req = (struct srp_login_req *)private_data;
-
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
+ req->initiator_port_id, req->target_port_id, it_iu_len,
+ port_num, &sport->gid, be16_to_cpu(pkey));
+ nexus = srpt_get_nexus(sport, req->initiator_port_id,
+ req->target_port_id);
+ if (IS_ERR(nexus)) {
+ ret = PTR_ERR(nexus);
+ goto out;
+ }
+
+ ret = -ENOMEM;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL);
rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
-
- if (!rsp || !rej || !rep_param) {
- ret = -ENOMEM;
+ if (!rsp || !rej || !rep_param)
goto out;
- }
+ ret = -EINVAL;
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because its"
- " length (%d bytes) is out of range (%d .. %d)\n",
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
}
if (!sport->enabled) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because the target port"
- " has not yet been enabled\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
+ sport->sdev->device->name, port_num);
goto reject;
}
- if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-
- mutex_lock(&sdev->mutex);
-
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
- if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
- && !memcmp(ch->t_port_id, req->target_port_id, 16)
- && param->port == ch->sport->port
- && param->listen_id == ch->sport->sdev->cm_id
- && ch->cm_id) {
- if (srpt_disconnect_ch(ch) < 0)
- continue;
- pr_info("Relogin - closed existing channel %s\n",
- ch->sess_name);
- rsp->rsp_flags =
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
- }
- }
-
- mutex_unlock(&sdev->mutex);
-
- } else
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
- ret = -ENOMEM;
- pr_err("rejected SRP_LOGIN_REQ because it"
- " has an invalid target port identifier.\n");
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
goto reject;
}
+ ret = -ENOMEM;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
- ret = -ENOMEM;
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
goto reject;
}
kref_init(&ch->kref);
+ ch->pkey = be16_to_cpu(pkey);
+ ch->nexus = nexus;
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
- memcpy(ch->i_port_id, req->initiator_port_id, 16);
- memcpy(ch->t_port_id, req->target_port_id, 16);
- ch->sport = &sdev->port[param->port - 1];
- ch->cm_id = cm_id;
+ ch->sport = sport;
+ ch->ib_cm.cm_id = cm_id;
cm_id->context = ch;
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/
- ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+ ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
- ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]),
- ch->rsp_size, DMA_TO_DEVICE);
- if (!ch->ioctx_ring)
+ ch->max_rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ch;
+ }
INIT_LIST_HEAD(&ch->free_list);
for (i = 0; i < ch->rq_size; i++) {
@@ -2059,59 +2155,88 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ring;
}
+ for (i = 0; i < ch->rq_size; i++)
+ INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
}
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because creating"
- " a new RDMA channel failed.\n");
- goto free_recv_ring;
- }
-
- ret = srpt_ch_qp_rtr(ch, ch->qp);
- if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because enabling"
- " RTR failed (error code = %d)\n", ret);
- goto destroy_ib;
+ pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
+ goto free_recv_ring;
}
- guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
- snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
- snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
- be64_to_cpu(*(__be64 *)ch->i_port_id),
- be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+ strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)nexus->i_port_id),
+ be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->ini_guid, ch, NULL);
+ ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL, ch->sess_name, ch,
+ TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->sess_name + 2, ch, NULL);
+ i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
- pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
- ch->sess_name);
- rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ ret = PTR_ERR(ch->sess);
+ pr_info("Rejected login for initiator %s: ret = %d.\n",
+ ch->sess_name, ret);
+ rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto reject;
+ }
+
+ mutex_lock(&sport->mutex);
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ struct srpt_rdma_ch *ch2;
+
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch2) < 0)
+ continue;
+ pr_info("Relogin - closed existing channel %s\n",
+ ch2->sess_name);
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ } else {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+ }
+
+ list_add_tail_rcu(&ch->list, &nexus->ch_list);
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ sdev->device->name, port_num);
+ mutex_unlock(&sport->mutex);
+ goto reject;
+ }
+
+ mutex_unlock(&sport->mutex);
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
+ ret);
goto destroy_ib;
}
- pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
- ch->sess_name, ch->cm_id);
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
+ ch->sess_name, ch);
/* create srp_login_response */
rsp->opcode = SRP_LOGIN_RSP;
@@ -2119,8 +2244,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2136,25 +2261,31 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rep_param->responder_resources = 4;
rep_param->initiator_depth = 4;
- ret = ib_send_cm_rep(cm_id, rep_param);
- if (ret) {
- pr_err("sending SRP_LOGIN_REQ response failed"
- " (error code = %d)\n", ret);
- goto release_channel;
- }
+ /*
+ * Hold the sport mutex while accepting a connection to avoid that
+ * srpt_disconnect_ch() is invoked concurrently with this code.
+ */
+ mutex_lock(&sport->mutex);
+ if (sport->enabled && ch->state == CH_CONNECTING)
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&sport->mutex);
- mutex_lock(&sdev->mutex);
- list_add_tail(&ch->list, &sdev->rch_list);
- mutex_unlock(&sdev->mutex);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ goto reject;
+ default:
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
goto out;
-release_channel:
- srpt_disconnect_ch(ch);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
destroy_ib:
srpt_destroy_ch_ib(ch);
@@ -2166,15 +2297,20 @@ free_recv_ring:
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
free_ch:
+ cm_id->context = NULL;
kfree(ch);
+ ch = NULL;
+
+ WARN_ON_ONCE(ret == 0);
reject:
+ pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof(*rej));
@@ -2187,6 +2323,19 @@ out:
return ret;
}
+static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ char sguid[40];
+
+ srpt_format_guid(sguid, sizeof(sguid),
+ &param->primary_path->dgid.global.interface_id);
+
+ return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
+ private_data, sguid);
+}
+
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
@@ -2207,7 +2356,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
}
/**
- * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
+ * @ch: SRPT RDMA channel.
*
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
@@ -2216,21 +2366,34 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
- if (srpt_set_ch_state(ch, CH_LIVE)) {
- ret = srpt_ch_qp_rts(ch, ch->qp);
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+ if (ret < 0) {
+ pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
+ ch->qp->qp_num);
+ srpt_close_ch(ch);
+ return;
+ }
- if (ret == 0) {
- /* Trigger wait list processing. */
- ret = srpt_zerolength_write(ch);
- WARN_ONCE(ret < 0, "%d\n", ret);
- } else {
- srpt_close_ch(ch);
- }
+ /*
+ * Note: calling srpt_close_ch() if the transition to the LIVE state
+ * fails is not necessary since that means that that function has
+ * already been invoked from another thread.
+ */
+ if (!srpt_set_ch_state(ch, CH_LIVE)) {
+ pr_err("%s-%d: channel transition to LIVE state failed\n",
+ ch->sess_name, ch->qp->qp_num);
+ return;
}
+
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
}
/**
- * srpt_cm_handler() - IB connection manager callback function.
+ * srpt_cm_handler - IB connection manager callback function
+ * @cm_id: IB/CM connection identifier.
+ * @event: IB/CM event.
*
* A non-zero return value will cause the caller destroy the CM ID.
*
@@ -2247,8 +2410,8 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
ret = 0;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
- ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
- event->private_data);
+ ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
break;
case IB_CM_REJ_RECEIVED:
srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
@@ -2295,11 +2458,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+ return ioctx->state == SRPT_STATE_NEED_DATA;
}
/*
- * srpt_write_pending() - Start data transfer from initiator to target (write).
+ * srpt_write_pending - Start data transfer from initiator to target (write).
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
@@ -2356,7 +2519,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
}
/**
- * srpt_queue_response() - Transmits the response to a SCSI command.
+ * srpt_queue_response - transmit the response to a SCSI command
+ * @cmd: SCSI target command.
*
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
@@ -2370,13 +2534,11 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
- unsigned long flags;
int resp_len, ret, i;
u8 srp_tm_status;
BUG_ON(!ch);
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
@@ -2391,7 +2553,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
ch, ioctx->ioctx.index, ioctx->state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
@@ -2495,26 +2656,56 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
+static bool srpt_ch_list_empty(struct srpt_port *sport)
+{
+ struct srpt_nexus *nexus;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry)
+ if (!list_empty(&nexus->ch_list))
+ res = false;
+ rcu_read_unlock();
+
+ return res;
+}
+
/**
- * srpt_release_sdev() - Free the channel resources associated with a target.
+ * srpt_release_sport - disable login and wait for associated channels
+ * @sport: SRPT HCA port.
*/
-static int srpt_release_sdev(struct srpt_device *sdev)
+static int srpt_release_sport(struct srpt_port *sport)
{
- int i, res;
+ struct srpt_nexus *nexus, *next_n;
+ struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
-
- mutex_lock(&sdev->mutex);
- for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- srpt_set_enabled(&sdev->port[i], false);
- mutex_unlock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, false);
+ mutex_unlock(&sport->mutex);
+
+ while (wait_event_timeout(sport->ch_releaseQ,
+ srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for session unregistration ...\n",
+ sport->sdev->device->name, sport->port);
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ pr_info("%s-%d: state %s\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ }
+ }
+ rcu_read_unlock();
+ }
- res = wait_event_interruptible(sdev->ch_releaseQ,
- list_empty_careful(&sdev->rch_list));
- if (res)
- pr_err("%s: interrupted.\n", __func__);
+ mutex_lock(&sport->mutex);
+ list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
+ list_del(&nexus->entry);
+ kfree_rcu(nexus, rcu);
+ }
+ mutex_unlock(&sport->mutex);
return 0;
}
@@ -2601,8 +2792,10 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
sdev->use_srq = true;
sdev->srq = srq;
- for (i = 0; i < sdev->srq_size; ++i)
+ for (i = 0; i < sdev->srq_size; ++i) {
+ INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
return 0;
}
@@ -2624,7 +2817,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
}
/**
- * srpt_add_one() - Infiniband device addition callback function.
+ * srpt_add_one - InfiniBand device addition callback function
+ * @device: Describes a HCA.
*/
static void srpt_add_one(struct ib_device *device)
{
@@ -2639,9 +2833,7 @@ static void srpt_add_one(struct ib_device *device)
goto err;
sdev->device = device;
- INIT_LIST_HEAD(&sdev->rch_list);
- init_waitqueue_head(&sdev->ch_releaseQ);
- mutex_init(&sdev->mutex);
+ mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
@@ -2682,6 +2874,9 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
+ INIT_LIST_HEAD(&sport->nexus_list);
+ init_waitqueue_head(&sport->ch_releaseQ);
+ mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
@@ -2722,7 +2917,9 @@ err:
}
/**
- * srpt_remove_one() - InfiniBand device removal callback function.
+ * srpt_remove_one - InfiniBand device removal callback function
+ * @device: Describes a HCA.
+ * @client_data: The value passed as the third argument to ib_set_client_data().
*/
static void srpt_remove_one(struct ib_device *device, void *client_data)
{
@@ -2752,7 +2949,9 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_lock(&srpt_dev_lock);
list_del(&sdev->list);
spin_unlock(&srpt_dev_lock);
- srpt_release_sdev(sdev);
+
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ srpt_release_sport(&sdev->port[i]);
srpt_free_srq(sdev);
@@ -2828,7 +3027,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
}
/**
- * srpt_close_session() - Forcibly close a session.
+ * srpt_close_session - forcibly close a session
+ * @se_sess: SCSI target session.
*
* Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes
@@ -2837,15 +3037,13 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- struct srpt_device *sdev = ch->sport->sdev;
- mutex_lock(&sdev->mutex);
srpt_disconnect_ch_sync(ch);
- mutex_unlock(&sdev->mutex);
}
/**
- * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
+ * @se_sess: SCSI target session.
*
* A quote from RFC 4455 (SCSI-MIB) about this MIB object:
* This object represents an arbitrary integer used to uniquely identify a
@@ -2867,7 +3065,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx);
+ return ioctx->state;
}
static int srpt_parse_guid(u64 *guid, const char *name)
@@ -2884,7 +3082,7 @@ out:
}
/**
- * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID.
*/
@@ -3065,18 +3263,24 @@ static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
if (val != !!val)
return -EINVAL;
- ret = mutex_lock_interruptible(&sdev->mutex);
+ ret = mutex_lock_interruptible(&sdev->sdev_mutex);
if (ret < 0)
return ret;
+ ret = mutex_lock_interruptible(&sport->mutex);
+ if (ret < 0)
+ goto unlock_sdev;
enabled = sport->enabled;
/* Log out all initiator systems before changing 'use_srq'. */
srpt_set_enabled(sport, false);
sport->port_attrib.use_srq = val;
srpt_use_srq(sdev, sport->port_attrib.use_srq);
srpt_set_enabled(sport, enabled);
- mutex_unlock(&sdev->mutex);
+ ret = count;
+ mutex_unlock(&sport->mutex);
+unlock_sdev:
+ mutex_unlock(&sdev->sdev_mutex);
- return count;
+ return ret;
}
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
@@ -3105,7 +3309,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- struct srpt_device *sdev = sport->sdev;
unsigned long tmp;
int ret;
@@ -3120,9 +3323,9 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
return -EINVAL;
}
- mutex_lock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
srpt_set_enabled(sport, tmp);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
return count;
}
@@ -3135,8 +3338,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
};
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @wwn: Corresponds to $driver/$port.
+ * @group: Not used.
+ * @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
struct config_group *group,
@@ -3158,8 +3363,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @tpg: Target portal group to deregister.
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
@@ -3170,8 +3375,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg)
}
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port
+ * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
+ * @tf: Not used.
+ * @group: Not used.
+ * @name: $port.
*/
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
@@ -3181,8 +3388,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port
+ * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
+ * @wwn: $port.
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
@@ -3240,7 +3447,7 @@ static const struct target_core_fabric_ops srpt_template = {
};
/**
- * srpt_init_module() - Kernel module initialization.
+ * srpt_init_module - kernel module initialization
*
* Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 673387d365a3..4d9199fd00dc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -54,6 +54,8 @@
*/
#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+struct srpt_nexus;
+
enum {
/*
* SRP IOControllerProfile attributes for SRP target ports that have
@@ -114,7 +116,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
- SRPT_RQ_SIZE = 128,
+ MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535,
@@ -134,7 +136,7 @@ enum {
};
/**
- * enum srpt_command_state - SCSI command state managed by SRPT.
+ * enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival.
@@ -158,7 +160,8 @@ enum srpt_command_state {
};
/**
- * struct srpt_ioctx - Shared SRPT I/O context information.
+ * struct srpt_ioctx - shared SRPT I/O context information
+ * @cqe: Completion queue element.
* @buf: Pointer to the buffer.
* @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array.
@@ -171,7 +174,7 @@ struct srpt_ioctx {
};
/**
- * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/
@@ -187,13 +190,20 @@ struct srpt_rw_ctx {
};
/**
- * struct srpt_send_ioctx - SRPT send I/O context.
+ * struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above.
* @ch: Channel pointer.
- * @spinlock: Protects 'state'.
+ * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
+ * @rw_ctxs: RDMA read/write contexts.
+ * @rdma_cqe: RDMA completion queue element.
+ * @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
+ * @n_rdma: Number of work requests needed to transfer this ioctx.
+ * @n_rw_ctx: Size of rw_ctxs array.
+ * @queue_status_only: Send a SCSI status back to the initiator but no data.
+ * @sense_data: Sense data to be sent to the initiator.
*/
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
@@ -204,10 +214,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe;
struct list_head free_list;
- spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
- struct completion tx_done;
u8 n_rdma;
u8 n_rw_ctx;
bool queue_status_only;
@@ -215,7 +223,7 @@ struct srpt_send_ioctx {
};
/**
- * enum rdma_ch_state - SRP channel state.
+ * enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
@@ -233,17 +241,19 @@ enum rdma_ch_state {
};
/**
- * struct srpt_rdma_ch - RDMA channel.
- * @cm_id: IB CM ID associated with the channel.
+ * struct srpt_rdma_ch - RDMA channel
+ * @nexus: I_T nexus this channel is associated with.
* @qp: IB queue pair used for communicating over this channel.
+ * @cm_id: IB CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @zw_cqe: Zero-length write CQE.
+ * @rcu: RCU head.
+ * @kref: kref for this channel.
* @rq_size: IB receive queue size.
- * @rsp_size IB response message size in bytes.
+ * @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
- * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
- * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
* @max_ti_iu_len: maximum target-to-initiator information unit length.
* @req_lim: request limit: maximum number of requests that may be sent
* by the initiator without having received a response.
@@ -251,30 +261,34 @@ enum rdma_ch_state {
* @spinlock: Protects free_list and state.
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
+ * @processing_wait_list: Whether or not cmd_wait_list is being processed.
* @ioctx_ring: Send ring.
* @ioctx_recv_ring: Receive I/O context ring.
- * @list: Node for insertion in the srpt_device.rch_list list.
+ * @list: Node in srpt_nexus.ch_list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
* against concurrent modification by the cm_id spinlock.
+ * @pkey: P_Key of the IB partition for this SRP channel.
* @sess: Session information associated with this SRP channel.
* @sess_name: Session name.
- * @ini_guid: Initiator port GUID.
* @release_work: Allows scheduling of srpt_release_channel().
- * @release_done: Enables waiting for srpt_release_channel() completion.
*/
struct srpt_rdma_ch {
- struct ib_cm_id *cm_id;
+ struct srpt_nexus *nexus;
struct ib_qp *qp;
+ union {
+ struct {
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ };
struct ib_cq *cq;
struct ib_cqe zw_cqe;
+ struct rcu_head rcu;
struct kref kref;
int rq_size;
- u32 rsp_size;
+ u32 max_rsp_size;
atomic_t sq_wr_avail;
struct srpt_port *sport;
- u8 i_port_id[16];
- u8 t_port_id[16];
int max_ti_iu_len;
atomic_t req_lim;
atomic_t req_lim_delta;
@@ -285,15 +299,31 @@ struct srpt_rdma_ch {
struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
+ uint16_t pkey;
+ bool processing_wait_list;
struct se_session *sess;
- u8 sess_name[36];
- u8 ini_guid[24];
+ u8 sess_name[24];
struct work_struct release_work;
- struct completion *release_done;
};
/**
- * struct srpt_port_attib - Attributes for SRPT port
+ * struct srpt_nexus - I_T nexus
+ * @rcu: RCU head for this data structure.
+ * @entry: srpt_port.nexus_list list node.
+ * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ */
+struct srpt_nexus {
+ struct rcu_head rcu;
+ struct list_head entry;
+ struct list_head ch_list;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+};
+
+/**
+ * struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
@@ -307,7 +337,7 @@ struct srpt_port_attrib {
};
/**
- * struct srpt_port - Information associated by SRPT with a single IB port.
+ * struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -323,7 +353,10 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID.
- * @port_acl_list: Head of the list with all node ACLs for this port.
+ * @port_attrib: Port attributes that can be accessed through configfs.
+ * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @mutex: Protects nexus_list.
+ * @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
struct srpt_port {
struct srpt_device *sdev;
@@ -341,21 +374,22 @@ struct srpt_port {
struct se_portal_group port_gid_tpg;
struct se_wwn port_gid_wwn;
struct srpt_port_attrib port_attrib;
+ wait_queue_head_t ch_releaseQ;
+ struct mutex mutex;
+ struct list_head nexus_list;
};
/**
- * struct srpt_device - Information associated by SRPT with a single HCA.
+ * struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
- * @ch_releaseQ: Enables waiting for removal from rch_list.
- * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -367,11 +401,9 @@ struct srpt_device {
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct list_head rch_list;
- wait_queue_head_t ch_releaseQ;
- struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 925571475005..0193dd4f0452 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -635,11 +635,11 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int evdev_poll(struct file *file, poll_table *wait)
+static __poll_t evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &evdev->wait, wait);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e30642db50d5..0d0b2ab1bb6b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1048,7 +1048,7 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
-static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
+static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &input_devices_poll_wait, wait);
if (file->f_version != input_devices_state) {
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 7b29a8944039..fe3255572886 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -436,7 +436,7 @@ static ssize_t joydev_read(struct file *file, char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int joydev_poll(struct file *file, poll_table *wait)
+static __poll_t joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09eba57..c868a878c84f 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d86e59515b9c..d88d3e0f59fb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,6 +229,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -476,6 +477,22 @@ static const u8 xboxone_hori_init[] = {
};
/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init1[] = {
+ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init2[] = {
+ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
+/*
* A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 1c8c56efc995..9c3f7ec3bd3d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -408,7 +408,7 @@ static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
return retval;
}
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
+static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae473123583b..3d51175c4d72 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return union_desc;
dev_err(&intf->dev,
- "Union descriptor to short (%d vs %zd\n)",
+ "Union descriptor too short (%d vs %zd)\n",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6c51d404874b..c37aea9ac272 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->coexist)
return true;
- node = of_find_node_by_name(node, "codec");
+ node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
return true;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 5690eb7ff954..15e0d352c4cc 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0;
int error;
- of_node_get(twl6040_core_dev->of_node);
- twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
+ twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
"vibra");
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 39ddd9a73feb..91df0df15e68 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -694,7 +694,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
return retval;
}
-static unsigned int uinput_poll(struct file *file, poll_table *wait)
+static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb5f8d9..d91f3b1c5375 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
0, width, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
0, height, 0, 0);
- input_set_abs_params(mtouch, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
if (ret) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 579b899add26..dbe57da8c1a1 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
} else {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
}
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX_BL;
- no_data_y = SS4_MFPACKET_NO_AY_BL;
} else {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX;
} else {
- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
}
+ no_data_y = SS4_MFPACKET_NO_AY;
+
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX;
- no_data_y = SS4_MFPACKET_NO_AY;
}
f->first_mp = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index c80a7c76cb76..79b6d69d1486 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
-#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
-#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
-#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
-#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
+#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
+#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
+#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
/*
* enum V7_PACKET_ID - defines the packet type for V7
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd978fce2..a4aaa748e987 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6 ... 14:
+ case 6 ... 15:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ee5466a374bf..cd9f61cb3fc6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0046", /* X250 */
"LEN004a", /* W541 */
"LEN200f", /* T450s */
+ "LEN2018", /* T460p */
NULL
};
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0871010f18d5..bbd29220dbe9 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -19,6 +19,13 @@
#include "psmouse.h"
#include "trackpoint.h"
+static const char * const trackpoint_variants[] = {
+ [TP_VARIANT_IBM] = "IBM",
+ [TP_VARIANT_ALPS] = "ALPS",
+ [TP_VARIANT_ELAN] = "Elan",
+ [TP_VARIANT_NXP] = "NXP",
+};
+
/*
* Power-on Reset: Resets all trackpoint parameters, including RAM values,
* to defaults.
@@ -26,7 +33,7 @@
*/
static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
{
- unsigned char results[2];
+ u8 results[2];
int tries = 0;
/* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/* Check for success response -- 0xAA00 */
if (results[0] != 0xAA || results[1] != 0x00)
- return -1;
+ return -ENODEV;
return 0;
}
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/*
* Device IO: read, write and toggle bit
*/
-static int trackpoint_read(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char *results)
+static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_write(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char val)
+static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char mask)
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
{
/* Bad things will happen if the loc param isn't in this range */
if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
- unsigned char mask, unsigned char value)
+static int trackpoint_update_bit(struct ps2dev *ps2dev,
+ u8 loc, u8 mask, u8 value)
{
int retval = 0;
- unsigned char data;
+ u8 data;
trackpoint_read(ps2dev, loc, &data);
if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
*/
struct trackpoint_attr_data {
size_t field_offset;
- unsigned char command;
- unsigned char mask;
- unsigned char inverted;
- unsigned char power_on_default;
+ u8 command;
+ u8 mask;
+ bool inverted;
+ u8 power_on_default;
};
-static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
+ void *data, char *buf)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
+ u8 value = *(u8 *)((void *)tp + attr->field_offset);
if (attr->inverted)
value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned char value;
+ u8 *field = (void *)tp + attr->field_offset;
+ u8 value;
int err;
err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned int value;
+ bool *field = (void *)tp + attr->field_offset;
+ bool value;
int err;
- err = kstrtouint(buf, 10, &value);
+ err = kstrtobool(buf, &value);
if (err)
return err;
- if (value > 1)
- return -EINVAL;
-
if (attr->inverted)
value = !value;
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
&trackpoint_attr_##_name, \
trackpoint_show_int_attr, trackpoint_set_bit_attr)
-#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
-do { \
- struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
- \
- trackpoint_update_bit(&_psmouse->ps2dev, \
- _attr->command, _attr->mask, _tp->_name); \
-} while (0)
-
-#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
-do { \
- if (!_power_on || \
- _tp->_name != trackpoint_attr_##_name.power_on_default) { \
- if (!trackpoint_attr_##_name.mask) \
- trackpoint_write(&_psmouse->ps2dev, \
- trackpoint_attr_##_name.command, \
- _tp->_name); \
- else \
- TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
- } \
-} while (0)
-
-#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
- (_tp->_name = trackpoint_attr_##_name.power_on_default)
-
TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
-TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
TP_DEF_PTSON);
-TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
TP_DEF_SKIPBACK);
-TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
TP_DEF_EXT_DEV);
+static bool trackpoint_is_attr_available(struct psmouse *psmouse,
+ struct attribute *attr)
+{
+ struct trackpoint_data *tp = psmouse->private;
+
+ return tp->variant_id == TP_VARIANT_IBM ||
+ attr == &psmouse_attr_sensitivity.dattr.attr ||
+ attr == &psmouse_attr_press_to_select.dattr.attr;
+}
+
+static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct serio *serio = to_serio_port(dev);
+ struct psmouse *psmouse = serio_get_drvdata(serio);
+
+ return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
+}
+
static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_sensitivity.dattr.attr,
&psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
};
static struct attribute_group trackpoint_attr_group = {
- .attrs = trackpoint_attrs,
+ .is_visible = trackpoint_is_attr_visible,
+ .attrs = trackpoint_attrs,
};
-static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
-{
- unsigned char param[2] = { 0 };
+#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
+do { \
+ struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
+ \
+ if ((!_power_on || _tp->_name != _attr->power_on_default) && \
+ trackpoint_is_attr_available(_psmouse, \
+ &psmouse_attr_##_name.dattr.attr)) { \
+ if (!_attr->mask) \
+ trackpoint_write(&_psmouse->ps2dev, \
+ _attr->command, _tp->_name); \
+ else \
+ trackpoint_update_bit(&_psmouse->ps2dev, \
+ _attr->command, _attr->mask, \
+ _tp->_name); \
+ } \
+} while (0)
- if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
- return -1;
+#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
+do { \
+ _tp->_name = trackpoint_attr_##_name.power_on_default; \
+} while (0)
- /* add new TP ID. */
- if (!(param[0] & TP_MAGIC_IDENT))
- return -1;
+static int trackpoint_start_protocol(struct psmouse *psmouse,
+ u8 *variant_id, u8 *firmware_id)
+{
+ u8 param[2] = { 0 };
+ int error;
- if (firmware_id)
- *firmware_id = param[1];
+ error = ps2_command(&psmouse->ps2dev,
+ param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
+ if (error)
+ return error;
+
+ switch (param[0]) {
+ case TP_VARIANT_IBM:
+ case TP_VARIANT_ALPS:
+ case TP_VARIANT_ELAN:
+ case TP_VARIANT_NXP:
+ if (variant_id)
+ *variant_id = param[0];
+ if (firmware_id)
+ *firmware_id = param[1];
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
/*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
{
struct trackpoint_data *tp = psmouse->private;
- if (!in_power_on_state) {
+ if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
/*
* Disable features that may make device unusable
* with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
static void trackpoint_disconnect(struct psmouse *psmouse)
{
- sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+ device_remove_group(&psmouse->ps2dev.serio->dev,
+ &trackpoint_attr_group);
kfree(psmouse->private);
psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
static int trackpoint_reconnect(struct psmouse *psmouse)
{
- int reset_fail;
+ struct trackpoint_data *tp = psmouse->private;
+ int error;
+ bool was_reset;
- if (trackpoint_start_protocol(psmouse, NULL))
- return -1;
+ error = trackpoint_start_protocol(psmouse, NULL, NULL);
+ if (error)
+ return error;
- reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
- if (trackpoint_sync(psmouse, !reset_fail))
- return -1;
+ was_reset = tp->variant_id == TP_VARIANT_IBM &&
+ trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
+
+ error = trackpoint_sync(psmouse, was_reset);
+ if (error)
+ return error;
return 0;
}
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
- unsigned char firmware_id;
- unsigned char button_info;
+ struct trackpoint_data *tp;
+ u8 variant_id;
+ u8 firmware_id;
+ u8 button_info;
int error;
- if (trackpoint_start_protocol(psmouse, &firmware_id))
- return -1;
+ error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
+ if (error)
+ return error;
if (!set_properties)
return 0;
- if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
- button_info = 0x33;
- }
-
- psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
- if (!psmouse->private)
+ tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
return -ENOMEM;
- psmouse->vendor = "IBM";
+ trackpoint_defaults(tp);
+ tp->variant_id = variant_id;
+ tp->firmware_id = firmware_id;
+
+ psmouse->private = tp;
+
+ psmouse->vendor = trackpoint_variants[variant_id];
psmouse->name = "TrackPoint";
psmouse->reconnect = trackpoint_reconnect;
psmouse->disconnect = trackpoint_disconnect;
+ if (variant_id != TP_VARIANT_IBM) {
+ /* Newer variants do not support extended button query. */
+ button_info = 0x33;
+ } else {
+ error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
+ if (error) {
+ psmouse_warn(psmouse,
+ "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ } else if (!button_info) {
+ psmouse_warn(psmouse,
+ "got 0 in extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ }
+ }
+
if ((button_info & 0x0f) >= 3)
- __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+ input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
__set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
- trackpoint_defaults(psmouse->private);
-
- error = trackpoint_power_on_reset(ps2dev);
-
- /* Write defaults to TP only if reset fails. */
- if (error)
+ if (variant_id != TP_VARIANT_IBM ||
+ trackpoint_power_on_reset(ps2dev) != 0) {
+ /*
+ * Write defaults to TP if we did not reset the trackpoint.
+ */
trackpoint_sync(psmouse, false);
+ }
- error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+ error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
if (error) {
psmouse_err(psmouse,
"failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
}
psmouse_info(psmouse,
- "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
- firmware_id,
+ "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+ psmouse->vendor, firmware_id,
(button_info & 0xf0) >> 4, button_info & 0x0f);
return 0;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 88055755f82e..10a039148234 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,10 +21,16 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
- /* by the firmware ID */
- /* Firmware ID includes 0x1, 0x2, 0x3 */
+/*
+ * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
+ * 0x01 was the original IBM trackpoint, others implement very limited
+ * subset of trackpoint features.
+ */
+#define TP_VARIANT_IBM 0x01
+#define TP_VARIANT_ALPS 0x02
+#define TP_VARIANT_ELAN 0x03
+#define TP_VARIANT_NXP 0x04
/*
* Commands
@@ -136,18 +142,20 @@
#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
-struct trackpoint_data
-{
- unsigned char sensitivity, speed, inertia, reach;
- unsigned char draghys, mindrag;
- unsigned char thresh, upthresh;
- unsigned char ztime, jenks;
- unsigned char drift_time;
+struct trackpoint_data {
+ u8 variant_id;
+ u8 firmware_id;
+
+ u8 sensitivity, speed, inertia, reach;
+ u8 draghys, mindrag;
+ u8 thresh, upthresh;
+ u8 ztime, jenks;
+ u8 drift_time;
/* toggles */
- unsigned char press_to_select;
- unsigned char skipback;
- unsigned char ext_dev;
+ bool press_to_select;
+ bool skipback;
+ bool ext_dev;
};
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 2d7f691ec71c..731d84ae5101 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -757,11 +757,11 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int mousedev_poll(struct file *file, poll_table *wait)
+static __poll_t mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &mousedev->wait, wait);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4f2bb5947a4e..141ea228aac6 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
"Failed to process interrupt request: %d\n", ret);
- if (count)
+ if (count) {
kfree(attn_data.data);
+ attn_data.data = NULL;
+ }
if (!kfifo_is_empty(&drvdata->attn_fifo))
return rmi_irq_fn(irq, dev_id);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index ae966e333a2f..8a07ae147df6 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
dev_set_drvdata(&fn->dev, f01);
- error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
+ error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
if (error)
- dev_warn(&fn->dev,
- "Failed to create attribute group: %d\n", error);
+ dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
return 0;
}
+static void rmi_f01_remove(struct rmi_function *fn)
+{
+ /* Note that the bus device is used, not the F01 device */
+ sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+}
+
static int rmi_f01_config(struct rmi_function *fn)
{
struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
},
.func = 0x01,
.probe = rmi_f01_probe,
+ .remove = rmi_f01_remove,
.config = rmi_f01_config,
.attention = rmi_f01_attention,
.suspend = rmi_f01_suspend,
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 516f9fe77a17..fccf55a380b2 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -239,11 +239,11 @@ out:
return retval;
}
-static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
+static __poll_t serio_raw_poll(struct file *file, poll_table *wait)
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &serio_raw->wait, wait);
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index df1fd41860ac..a63de06b08bc 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -248,7 +248,7 @@ out:
return error ?: count;
}
-static unsigned int userio_char_poll(struct file *file, poll_table *wait)
+static __poll_t userio_char_poll(struct file *file, poll_table *wait)
{
struct userio_device *userio = file->private_data;
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 7ed828a51f4c..3486d9403805 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
int data, n, ret;
if (!np)
return -ENODEV;
- np = of_find_node_by_name(np, "touch");
+ np = of_get_child_by_name(np, "touch");
if (!np) {
dev_err(&pdev->dev, "Can't find touch node\n");
return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set tsi prebias time */
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set prebias & prechg time of pen detect */
data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
+
+ of_node_put(np);
+
return 0;
+
+err_put_node:
+ of_node_put(np);
+
+ return -EINVAL;
}
#else
#define pm860x_touch_dt_init(x, y, z) (-1)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d7764bc2..a458e5ec9e41 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/async.h>
#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
}
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7c2e1f..f1cd4dd9a4a3 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
#include <linux/of.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 8d7f9c8f2771..9642f103b726 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -13,6 +13,7 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
+#include <linux/module.h>
static bool touchscreen_get_prop_u32(struct device *dev,
const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
}
EXPORT_SYMBOL(touchscreen_report_pos);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 26b1cb8a88ec..675efa93d444 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Samsung S6SY761 Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// Samsung S6SY761 Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <asm/unaligned.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c12d01899939..2a123e20a42e 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * STMicroelectronics FTS Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// STMicroelectronics FTS Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb004091d..97baf88d9505 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
struct irq_cfg *cfg);
static int irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 7d94e1d39e5e..df72493a0f13 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -427,6 +427,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static const struct mmu_notifier_ops iommu_mn = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
.invalidate_range = mn_invalidate_range,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071688fd..744592d330ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1UL << ias) - 1;
domain->geometry.force_aperture = true;
- smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (ret < 0)
+ if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
+ return ret;
+ }
- return ret;
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
}
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
- int i;
+ int i, j;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ /* Bridged PCI devices may end up with duplicated IDs */
+ for (j = 0; j < i; j++)
+ if (fwspec->ids[j] == sid)
+ break;
+ if (j < i)
+ continue;
+
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4a2de34895ec..a1373cf34326 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4808,7 +4808,7 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index ed1cf7c5a43b..0a826eb7fe48 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -276,6 +276,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
}
static const struct mmu_notifier_ops intel_mmuops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.release = intel_mm_release,
.change_pte = intel_change_pte,
.invalidate_range = intel_invalidate_range,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 76a193c7fcfc..66f69af2c219 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
}
static int intel_irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
intel_ir_reconfigure_irte(irq_data, true);
return 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c70476b34a53..d913aec85109 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -343,4 +343,12 @@ config MESON_IRQ_GPIO
help
Support Meson SoC Family GPIO Interrupt Multiplexer
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d2df34a54d38..d27e3e3619e0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 667b9e14b032..dfe4a460340b 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -98,13 +98,35 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
-static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
-{
- int irq = irq_create_mapping(intc.domain, hwirq);
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
}
static void
@@ -165,7 +187,8 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onecell
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
};
static void
@@ -218,19 +241,6 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
- &bcm2836_arm_irqchip_gpu);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
- &bcm2836_arm_irqchip_pmu);
-
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4039e64cd342..06f025fd5726 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static int its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
}
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b56c3e23f0af..a57c0fbbd34a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1070,31 +1070,6 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
-static int get_cpu_number(struct device_node *dn)
-{
- const __be32 *cell;
- u64 hwid;
- int cpu;
-
- cell = of_get_property(dn, "reg", NULL);
- if (!cell)
- return -1;
-
- hwid = of_read_number(cell, of_n_addr_cells(dn));
-
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK)
- return -1;
-
- for_each_possible_cpu(cpu)
- if (cpu_logical_map(cpu) == hwid)
- return cpu;
-
- return -1;
-}
-
/* Create all possible partitions at boot time */
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
{
@@ -1145,8 +1120,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(!cpu_node))
continue;
- cpu = get_cpu_number(cpu_node);
- if (WARN_ON(cpu == -1))
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
continue;
pr_cont("%pOF[%d] ", cpu_node, cpu);
@@ -1331,6 +1306,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1380,6 +1359,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000000..2a92f03c73e4
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
index cf6d0c455518..e66ef4373b1e 100644
--- a/drivers/irqchip/irq-ompic.c
+++ b/drivers/irqchip/irq-ompic.c
@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
/* Setup the device */
ompic_base = ioremap(res.start, resource_size(&res));
- if (IS_ERR(ompic_base)) {
+ if (!ompic_base) {
pr_err("ompic: unable to map registers");
- return PTR_ERR(ompic_base);
+ return -ENOMEM;
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 06f29cf5018a..cee59fe1321c 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
*/
static struct lock_class_key intc_irqpin_irq_lock_class;
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
intc_irqpin_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
- irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
+ irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+ &intc_irqpin_irq_request_class);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
return 0;
}
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index dde8f46bc254..e268811dc544 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -724,11 +724,11 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
return count;
}
-static unsigned int
+static __poll_t
capi_poll(struct file *file, poll_table *wait)
{
struct capidev *cdev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!cdev->ap.applid)
return POLLERR;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 1c5dc345e7c5..34b7704042a4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -119,10 +119,10 @@ isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_
/***************************************/
/* select routines for various kernels */
/***************************************/
-static unsigned int
+static __poll_t
isdn_divert_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &(rd_queue), wait);
/* mask = POLLOUT | POLLWRNORM; */
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 72e58bf07577..70f16102a001 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -98,9 +98,9 @@ void diva_os_get_time(dword *sec, dword *usec)
/*
* device node operations
*/
-static unsigned int maint_poll(struct file *file, poll_table *wait)
+static __poll_t maint_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &msgwaitq, wait);
mask = POLLOUT | POLLWRNORM;
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 0033d74a7291..da5cc5ab7e2d 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -74,7 +74,7 @@ static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
loff_t *offset);
static ssize_t um_idi_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset);
-static unsigned int um_idi_poll(struct file *file, poll_table *wait);
+static __poll_t um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
@@ -365,7 +365,7 @@ um_idi_write(struct file *file, const char __user *buf, size_t count,
return (ret);
}
-static unsigned int um_idi_poll(struct file *file, poll_table *wait)
+static __poll_t um_idi_poll(struct file *file, poll_table *wait)
{
diva_um_idi_os_context_t *p_os;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b2023e08dcd2..fbc788e6f0db 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -650,7 +650,7 @@ static ssize_t divas_read(struct file *file, char __user *buf,
return (ret);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
if (!file->private_data) {
return (POLLERR);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b57efd6ad916..3478f6f099eb 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -99,7 +99,7 @@ divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off
return (-ENODEV);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
return (POLLERR);
}
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index aaca0b3d662e..6abea6915f49 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -281,10 +281,10 @@ hysdn_log_close(struct inode *ino, struct file *filep)
/*************************************************/
/* select/poll routine to be able using select() */
/*************************************************/
-static unsigned int
+static __poll_t
hysdn_log_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
hysdn_card *card = PDE_DATA(file_inode(file));
struct procdata *pd = card->proclog;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8b03d618185e..0521c32949d4 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1227,10 +1227,10 @@ out:
return retval;
}
-static unsigned int
+static __poll_t
isdn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index e07aefb9151d..57884319b4b1 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -685,10 +685,10 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
-unsigned int
+__poll_t
isdn_ppp_poll(struct file *file, poll_table *wait)
{
- u_int mask;
+ __poll_t mask;
struct ippp_buf_queue *bf, *bl;
u_long flags;
struct ippp_struct *is;
diff --git a/drivers/isdn/i4l/isdn_ppp.h b/drivers/isdn/i4l/isdn_ppp.h
index 4e9b8935a4eb..34b8a2ce84f3 100644
--- a/drivers/isdn/i4l/isdn_ppp.h
+++ b/drivers/isdn/i4l/isdn_ppp.h
@@ -23,7 +23,7 @@ extern int isdn_ppp_autodial_filter(struct sk_buff *, isdn_net_local *);
extern int isdn_ppp_xmit(struct sk_buff *, struct net_device *);
extern void isdn_ppp_receive(isdn_net_dev *, isdn_net_local *, struct sk_buff *);
extern int isdn_ppp_dev_ioctl(struct net_device *, struct ifreq *, int);
-extern unsigned int isdn_ppp_poll(struct file *, struct poll_table_struct *);
+extern __poll_t isdn_ppp_poll(struct file *, struct poll_table_struct *);
extern int isdn_ppp_ioctl(int, struct file *, unsigned int, unsigned long);
extern void isdn_ppp_release(int, struct file *);
extern int isdn_ppp_dial_slave(char *);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index e3654782a3e2..21d50e4cc5e1 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -645,8 +645,10 @@ l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
- struct msghdr msg;
struct sockaddr_in sin_rx;
+ struct kvec iov;
+ struct msghdr msg = {.msg_name = &sin_rx,
+ .msg_namelen = sizeof(sin_rx)};
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
@@ -661,6 +663,9 @@ l1oip_socket_thread(void *data)
goto fail;
}
+ iov.iov_base = recvbuf;
+ iov.iov_len = recvbuf_size;
+
/* make daemon */
allow_signal(SIGTERM);
@@ -697,12 +702,6 @@ l1oip_socket_thread(void *data)
goto fail;
}
- /* build receive message */
- msg.msg_name = &sin_rx;
- msg.msg_namelen = sizeof(sin_rx);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
-
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
@@ -719,12 +718,9 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- struct kvec iov = {
- .iov_base = recvbuf,
- .iov_len = recvbuf_size,
- };
- recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
- recvbuf_size, 0);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
+ recvbuf_size);
+ recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c50a34340f67..f4272d4e0a26 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -141,11 +141,11 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
return ret;
}
-static unsigned int
+static __poll_t
mISDN_poll(struct file *filep, poll_table *wait)
{
struct mISDNtimerdev *dev = filep->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 318a28fd58fe..3e763d2a0cb3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -137,6 +137,13 @@ config LEDS_LM3642
converter plus 1.5A constant current driver for a high-current
white LED.
+config LEDS_LM3692X
+ tristate "LED support for LM3692x Chips"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ This option enables support for the TI LM3692x family
+ of white LED string drivers used for backlighting.
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
@@ -347,7 +354,7 @@ config LEDS_LP8788
config LEDS_LP8860
tristate "LED support for the TI LP8860 4 channel LED driver"
- depends on LEDS_CLASS && I2C
+ depends on LEDS_CLASS && I2C && OF
select REGMAP_I2C
help
If you say yes here you get support for the TI LP8860 4 channel
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2a6b5a4f86d..987884a5b9a5 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fd83c7f77a95..ede4fa0ac2cc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev,
{
del_timer_sync(&led_cdev->blink_timer);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 9a257f969300..f883616d9e60 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -360,7 +360,8 @@ static int as3645a_set_flash_brightness(struct led_classdev_flash *fled,
{
struct as3645a *flash = fled_to_as3645a(fled);
- flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua);
+ flash->flash_current = as3645a_current_to_reg(flash, true,
+ brightness_ua);
return as3645a_set_current(flash);
}
@@ -455,8 +456,8 @@ static int as3645a_detect(struct as3645a *flash)
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
- dev_err(dev, "AS3645A not detected "
- "(model %d rfu %d)\n", model, rfu);
+ dev_err(dev, "AS3645A not detected (model %d rfu %d)\n",
+ model, rfu);
return -ENODEV;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index d03ed6b4176b..851c1920b63c 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -549,8 +549,12 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
new file mode 100644
index 000000000000..437173d1712c
--- /dev/null
+++ b/drivers/leds/leds-lm3692x.c
@@ -0,0 +1,393 @@
+/*
+ * TI lm3692x LED Driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Data sheet is located
+ * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LM3692X_REV 0x0
+#define LM3692X_RESET 0x1
+#define LM3692X_EN 0x10
+#define LM3692X_BRT_CTRL 0x11
+#define LM3692X_PWM_CTRL 0x12
+#define LM3692X_BOOST_CTRL 0x13
+#define LM3692X_AUTO_FREQ_HI 0x15
+#define LM3692X_AUTO_FREQ_LO 0x16
+#define LM3692X_BL_ADJ_THRESH 0x17
+#define LM3692X_BRT_LSB 0x18
+#define LM3692X_BRT_MSB 0x19
+#define LM3692X_FAULT_CTRL 0x1e
+#define LM3692X_FAULT_FLAGS 0x1f
+
+#define LM3692X_SW_RESET BIT(0)
+#define LM3692X_DEVICE_EN BIT(0)
+#define LM3692X_LED1_EN BIT(1)
+#define LM3692X_LED2_EN BIT(2)
+
+/* Brightness Control Bits */
+#define LM3692X_BL_ADJ_POL BIT(0)
+#define LM3692X_RAMP_RATE_125us 0x00
+#define LM3692X_RAMP_RATE_250us BIT(1)
+#define LM3692X_RAMP_RATE_500us BIT(2)
+#define LM3692X_RAMP_RATE_1ms (BIT(1) | BIT(2))
+#define LM3692X_RAMP_RATE_2ms BIT(3)
+#define LM3692X_RAMP_RATE_4ms (BIT(3) | BIT(1))
+#define LM3692X_RAMP_RATE_8ms (BIT(2) | BIT(3))
+#define LM3692X_RAMP_RATE_16ms (BIT(1) | BIT(2) | BIT(3))
+#define LM3692X_RAMP_EN BIT(4)
+#define LM3692X_BRHT_MODE_REG 0x00
+#define LM3692X_BRHT_MODE_PWM BIT(5)
+#define LM3692X_BRHT_MODE_MULTI_RAMP BIT(6)
+#define LM3692X_BRHT_MODE_RAMP_MULTI (BIT(5) | BIT(6))
+#define LM3692X_MAP_MODE_EXP BIT(7)
+
+/* PWM Register Bits */
+#define LM3692X_PWM_FILTER_100 BIT(0)
+#define LM3692X_PWM_FILTER_150 BIT(1)
+#define LM3692X_PWM_FILTER_200 (BIT(0) | BIT(1))
+#define LM3692X_PWM_HYSTER_1LSB BIT(2)
+#define LM3692X_PWM_HYSTER_2LSB BIT(3)
+#define LM3692X_PWM_HYSTER_3LSB (BIT(3) | BIT(2))
+#define LM3692X_PWM_HYSTER_4LSB BIT(4)
+#define LM3692X_PWM_HYSTER_5LSB (BIT(4) | BIT(2))
+#define LM3692X_PWM_HYSTER_6LSB (BIT(4) | BIT(3))
+#define LM3692X_PWM_POLARITY BIT(5)
+#define LM3692X_PWM_SAMP_4MHZ BIT(6)
+#define LM3692X_PWM_SAMP_24MHZ BIT(7)
+
+/* Boost Control Bits */
+#define LM3692X_OCP_PROT_1A BIT(0)
+#define LM3692X_OCP_PROT_1_25A BIT(1)
+#define LM3692X_OCP_PROT_1_5A (BIT(0) | BIT(1))
+#define LM3692X_OVP_21V BIT(2)
+#define LM3692X_OVP_25V BIT(3)
+#define LM3692X_OVP_29V (BIT(2) | BIT(3))
+#define LM3692X_MIN_IND_22UH BIT(4)
+#define LM3692X_BOOST_SW_1MHZ BIT(5)
+#define LM3692X_BOOST_SW_NO_SHIFT BIT(6)
+
+/* Fault Control Bits */
+#define LM3692X_FAULT_CTRL_OVP BIT(0)
+#define LM3692X_FAULT_CTRL_OCP BIT(1)
+#define LM3692X_FAULT_CTRL_TSD BIT(2)
+#define LM3692X_FAULT_CTRL_OPEN BIT(3)
+
+/* Fault Flag Bits */
+#define LM3692X_FAULT_FLAG_OVP BIT(0)
+#define LM3692X_FAULT_FLAG_OCP BIT(1)
+#define LM3692X_FAULT_FLAG_TSD BIT(2)
+#define LM3692X_FAULT_FLAG_SHRT BIT(3)
+#define LM3692X_FAULT_FLAG_OPEN BIT(4)
+
+/**
+ * struct lm3692x_led -
+ * @lock - Lock for reading/writing the device
+ * @client - Pointer to the I2C client
+ * @led_dev - LED class device pointer
+ * @regmap - Devices register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @label - LED label
+ */
+struct lm3692x_led {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ char label[LED_MAX_NAME_SIZE];
+};
+
+static const struct reg_default lm3692x_reg_defs[] = {
+ {LM3692X_EN, 0xf},
+ {LM3692X_BRT_CTRL, 0x61},
+ {LM3692X_PWM_CTRL, 0x73},
+ {LM3692X_BOOST_CTRL, 0x6f},
+ {LM3692X_AUTO_FREQ_HI, 0x0},
+ {LM3692X_AUTO_FREQ_LO, 0x0},
+ {LM3692X_BL_ADJ_THRESH, 0x0},
+ {LM3692X_BRT_LSB, 0x7},
+ {LM3692X_BRT_MSB, 0xff},
+ {LM3692X_FAULT_CTRL, 0x7},
+};
+
+static const struct regmap_config lm3692x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3692X_FAULT_FLAGS,
+ .reg_defaults = lm3692x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3692x_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int lm3692x_fault_check(struct lm3692x_led *led)
+{
+ int ret;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (ret)
+ return ret;
+
+ if (read_buf)
+ dev_err(&led->client->dev, "Detected a fault 0x%X\n", read_buf);
+
+ /* The first read may clear the fault. Check again to see if the fault
+ * still exits and return that value.
+ */
+ regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (read_buf)
+ dev_err(&led->client->dev, "Second read of fault flags 0x%X\n",
+ read_buf);
+
+ return read_buf;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3692x_led *led =
+ container_of(led_cdev, struct lm3692x_led, led_dev);
+ int ret;
+ int led_brightness_lsb = (brt_val >> 5);
+
+ mutex_lock(&led->lock);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lm3692x_init(struct lm3692x_led *led)
+{
+ int ret;
+
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL, 0x00);
+ if (ret)
+ goto out;
+
+ /*
+ * For glitch free operation, the following data should
+ * only be written while device enable bit is 0
+ * per Section 7.5.14 of the data sheet
+ */
+ ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
+ LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
+ LM3692X_BRHT_MODE_RAMP_MULTI |
+ LM3692X_BL_ADJ_POL |
+ LM3692X_RAMP_RATE_250us);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_HI, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_LO, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BL_ADJ_THRESH, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
+ LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ if (ret)
+ goto out;
+
+ return ret;
+out:
+ dev_err(&led->client->dev, "Fail writing initialization values\n");
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return ret;
+}
+
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lm3692x_led *led;
+ struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::backlight_cluster", id->name);
+ };
+
+ led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(led->enable_gpio)) {
+ ret = PTR_ERR(led->enable_gpio);
+ dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+
+ mutex_init(&led->lock);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lm3692x_init(led);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3692x_remove(struct i2c_client *client)
+{
+ struct lm3692x_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3692x_id[] = {
+ { "lm36922", 0 },
+ { "lm36923", 1 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm3692x_id);
+
+static const struct of_device_id of_lm3692x_leds_match[] = {
+ { .compatible = "ti,lm36922", },
+ { .compatible = "ti,lm36923", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3692x_leds_match);
+
+static struct i2c_driver lm3692x_driver = {
+ .driver = {
+ .name = "lm3692x",
+ .of_match_table = of_lm3692x_leds_match,
+ },
+ .probe = lm3692x_probe,
+ .remove = lm3692x_remove,
+ .id_table = lm3692x_id,
+};
+module_i2c_driver(lm3692x_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LM3692X LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3e70775a2d54..39c72a908f3b 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -22,6 +22,7 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
#define LP8860_DISP_CL1_BRT_MSB 0x00
#define LP8860_DISP_CL1_BRT_LSB 0x01
@@ -86,8 +87,6 @@
#define LP8860_CLEAR_FAULTS 0x01
-#define LP8860_DISP_LED_NAME "display_cluster"
-
/**
* struct lp8860_led -
* @lock - Lock for reading/writing the device
@@ -98,7 +97,7 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
-**/
+ */
struct lp8860_led {
struct mutex lock;
struct i2c_client *client;
@@ -107,7 +106,7 @@ struct lp8860_led {
struct regmap *eeprom_regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- const char *label;
+ char label[LED_MAX_NAME_SIZE];
};
struct lp8860_eeprom_reg {
@@ -247,6 +246,15 @@ static int lp8860_init(struct lp8860_led *led)
unsigned int read_buf;
int ret, i, reg_count;
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 1);
@@ -282,12 +290,25 @@ static int lp8860_init(struct lp8860_led *led)
ret = regmap_write(led->regmap,
LP8860_EEPROM_CNTRL,
LP8860_PROGRAM_EEPROM);
- if (ret)
+ if (ret) {
dev_err(&led->client->dev, "Failed programming EEPROM\n");
+ goto out;
+ }
+
+ return ret;
+
out:
if (ret)
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
return ret;
}
@@ -365,19 +386,25 @@ static int lp8860_probe(struct i2c_client *client,
int ret;
struct lp8860_led *led;
struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led->label = LP8860_DISP_LED_NAME;
-
- if (client->dev.of_node) {
- ret = of_property_read_string(np, "label", &led->label);
- if (ret) {
- dev_err(&client->dev, "Missing label in dt\n");
- return -EINVAL;
- }
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label), "%s:%s",
+ id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::display_cluster", id->name);
}
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
@@ -394,7 +421,6 @@ static int lp8860_probe(struct i2c_client *client,
led->client = client;
led->led_dev.name = led->label;
- led->led_dev.max_brightness = LED_FULL;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
mutex_init(&led->lock);
@@ -421,7 +447,7 @@ static int lp8860_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = led_classdev_register(&client->dev, &led->led_dev);
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
if (ret) {
dev_err(&client->dev, "led register err: %d\n", ret);
return ret;
@@ -435,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
- led_classdev_unregister(&led->led_dev);
-
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -447,6 +471,8 @@ static int lp8860_remove(struct i2c_client *client)
"Failed to disable regulator\n");
}
+ mutex_destroy(&led->lock);
+
return 0;
}
@@ -456,18 +482,16 @@ static const struct i2c_device_id lp8860_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8860_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8860_leds_match[] = {
{ .compatible = "ti,lp8860", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
-#endif
static struct i2c_driver lp8860_driver = {
.driver = {
.name = "lp8860",
- .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ .of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
.remove = lp8860_remove,
@@ -477,4 +501,4 @@ module_i2c_driver(lp8860_driver);
MODULE_DESCRIPTION("Texas Instruments LP8860 LED driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a52674327857..8988ba3b2d65 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8d456dc6c5bf..df80c89ebe7f 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index bb090216b4dc..a2559b4fdfff 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -81,7 +81,7 @@ config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
depends on LEDS_TRIGGERS
help
- This allows LEDs to be controlled by a immediate CPU usage.
+ This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
intense brightness depending on the instant CPU load.
If unsure, say N.
@@ -135,4 +135,11 @@ config LEDS_TRIGGER_PANIC
a different trigger.
If unsure, say Y.
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on NET && LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 4a8b6cff7761..f3cfe1950538 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
new file mode 100644
index 000000000000..6df4781a6308
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 Ben Whitten <ben.whitten@gmail.com>
+// Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+//
+// LED Kernel Netdev Trigger
+//
+// Toggles the LED to reflect the link and traffic state of a named net device
+//
+// Derived from ledtrig-timer.c which is:
+// Copyright 2005-2006 Openedhand Ltd.
+// Author: Richard Purdie <rpurdie@openedhand.com>
+
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ * interval - duration of LED blink, in milliseconds
+ * link - LED's normal state reflects whether the link is up
+ * (has carrier) or not
+ * tx - LED blinks on transmitted data
+ * rx - LED blinks on receive data
+ *
+ */
+
+struct led_netdev_data {
+ spinlock_t lock;
+
+ struct delayed_work work;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ atomic_t interval;
+ unsigned int last_activity;
+
+ unsigned long mode;
+#define NETDEV_LED_LINK 0
+#define NETDEV_LED_TX 1
+#define NETDEV_LED_RX 2
+#define NETDEV_LED_MODE_LINKUP 3
+};
+
+enum netdev_led_attr {
+ NETDEV_ATTR_LINK,
+ NETDEV_ATTR_TX,
+ NETDEV_ATTR_RX
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ int current_brightness;
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
+
+ current_brightness = led_cdev->brightness;
+ if (current_brightness)
+ led_cdev->blink_brightness = current_brightness;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+
+ if (!test_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode))
+ led_set_brightness(led_cdev, LED_OFF);
+ else {
+ if (test_bit(NETDEV_LED_LINK, &trigger_data->mode))
+ led_set_brightness(led_cdev,
+ led_cdev->blink_brightness);
+ else
+ led_set_brightness(led_cdev, LED_OFF);
+
+ /* If we are looking for RX/TX start periodically
+ * checking stats
+ */
+ if (test_bit(NETDEV_LED_TX, &trigger_data->mode) ||
+ test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ schedule_delayed_work(&trigger_data->work, 0);
+ }
+}
+
+static ssize_t device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ ssize_t len;
+
+ spin_lock_bh(&trigger_data->lock);
+ len = sprintf(buf, "%s\n", trigger_data->device_name);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return len;
+}
+
+static ssize_t device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size >= IFNAMSIZ)
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ strncpy(trigger_data->device_name, buf, size);
+ if (size > 0 && trigger_data->device_name[size - 1] == '\n')
+ trigger_data->device_name[size - 1] = 0;
+
+ if (trigger_data->device_name[0] != 0)
+ trigger_data->net_dev =
+ dev_get_by_name(&init_net, trigger_data->device_name);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ if (trigger_data->net_dev != NULL)
+ if (netif_carrier_ok(trigger_data->net_dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+
+ trigger_data->last_activity = 0;
+
+ set_baseline_state(trigger_data);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(device_name);
+
+static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
+ enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int bit;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", test_bit(bit, &trigger_data->mode));
+}
+
+static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
+ size_t size, enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+ int bit;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (state)
+ set_bit(bit, &trigger_data->mode);
+ else
+ clear_bit(bit, &trigger_data->mode);
+
+ set_baseline_state(trigger_data);
+
+ return size;
+}
+
+static ssize_t link_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_LINK);
+}
+
+static ssize_t link_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_LINK);
+}
+
+static DEVICE_ATTR_RW(link);
+
+static ssize_t tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_TX);
+}
+
+static ssize_t tx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_TX);
+}
+
+static DEVICE_ATTR_RW(tx);
+
+static ssize_t rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_RX);
+}
+
+static ssize_t rx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_RX);
+}
+
+static DEVICE_ATTR_RW(rx);
+
+static ssize_t interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n",
+ jiffies_to_msecs(atomic_read(&trigger_data->interval)));
+}
+
+static ssize_t interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ /* impose some basic bounds on the timer interval */
+ if (value >= 5 && value <= 10000) {
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ set_baseline_state(trigger_data); /* resets timer */
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(interval);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt, void *dv)
+{
+ struct net_device *dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
+ struct led_netdev_data *trigger_data = container_of(nb,
+ struct
+ led_netdev_data,
+ notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
+ return NOTIFY_DONE;
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ return NOTIFY_DONE;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ switch (evt) {
+ case NETDEV_REGISTER:
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ break;
+ case NETDEV_CHANGENAME:
+ case NETDEV_UNREGISTER:
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ break;
+ }
+
+ set_baseline_state(trigger_data);
+
+ spin_unlock_bh(&trigger_data->lock);
+
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_work(struct work_struct *work)
+{
+ struct led_netdev_data *trigger_data = container_of(work,
+ struct
+ led_netdev_data,
+ work.work);
+ struct rtnl_link_stats64 *dev_stats;
+ unsigned int new_activity;
+ struct rtnl_link_stats64 temp;
+ unsigned long interval;
+ int invert;
+
+ /* If we dont have a device, insure we are off */
+ if (!trigger_data->net_dev) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ return;
+ }
+
+ /* If we are not looking for RX/TX then return */
+ if (!test_bit(NETDEV_LED_TX, &trigger_data->mode) &&
+ !test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ return;
+
+ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ (test_bit(NETDEV_LED_TX, &trigger_data->mode) ?
+ dev_stats->tx_packets : 0) +
+ (test_bit(NETDEV_LED_RX, &trigger_data->mode) ?
+ dev_stats->rx_packets : 0);
+
+ if (trigger_data->last_activity != new_activity) {
+ led_stop_software_blink(trigger_data->led_cdev);
+
+ invert = test_bit(NETDEV_LED_LINK, &trigger_data->mode);
+ interval = jiffies_to_msecs(
+ atomic_read(&trigger_data->interval));
+ /* base state is ON (link present) */
+ led_blink_set_oneshot(trigger_data->led_cdev,
+ &interval,
+ &interval,
+ invert);
+ trigger_data->last_activity = new_activity;
+ }
+
+ schedule_delayed_work(&trigger_data->work,
+ (atomic_read(&trigger_data->interval)*2));
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ spin_lock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_link);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_rx);
+ if (rc)
+ goto err_out_link;
+ rc = device_create_file(led_cdev->dev, &dev_attr_tx);
+ if (rc)
+ goto err_out_rx;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_tx;
+ rc = register_netdevice_notifier(&trigger_data->notifier);
+ if (rc)
+ goto err_out_interval;
+ return;
+
+err_out_interval:
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+err_out_tx:
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+err_out_rx:
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+err_out_link:
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7acce64b692a..9d1769073562 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -1,22 +1,15 @@
-/*
- * LED Kernel Transient Trigger
- *
- * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
- *
- * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
- * ledtrig-heartbeat.c
- * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
- * Neil Brown <neilb@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-/*
- * Transient trigger allows one shot timer activation. Please refer to
- * Documentation/leds/ledtrig-transient.txt for details
-*/
+// SPDX-License-Identifier: GPL-2.0
+//
+// LED Kernel Transient Trigger
+//
+// Transient trigger allows one shot timer activation. Please refer to
+// Documentation/leds/ledtrig-transient.txt for details
+// Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+//
+// Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+// ledtrig-heartbeat.c
+// Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+// Neil Brown <neilb@suse.de>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -238,4 +231,4 @@ module_exit(transient_trig_exit);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 5e9e8a1fdefb..5beacab05ed7 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -176,7 +176,7 @@ static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
return retval;
}
-static unsigned int uleds_poll(struct file *file, poll_table *wait)
+static __poll_t uleds_poll(struct file *file, poll_table *wait)
{
struct uleds_device *udev = file->private_data;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2a953efec4e1..10c08982185a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,13 +27,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_RRPC
- tristate "Round-robin Hybrid Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target is implemented using a linear mapping table and
- cost-based garbage collection. It is optimized for 4K IO sizes.
-
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 2c3fd9d2c08c..97d9d7c71550 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_NVM) := core.o
-obj-$(CONFIG_NVM_RRPC) += rrpc.o
obj-$(CONFIG_NVM_PBLK) += pblk.o
pblk-y := pblk-init.o pblk-core.o pblk-rb.o \
pblk-write.o pblk-cache.o pblk-read.o \
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 83249b43dd06..dcc9e621e651 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,12 +45,6 @@ struct nvm_dev_map {
int nr_chnls;
};
-struct nvm_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -62,6 +56,30 @@ static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
return NULL;
}
+static bool nvm_target_exists(const char *name)
+{
+ struct nvm_dev *dev;
+ struct nvm_target *tgt;
+ bool ret = false;
+
+ down_write(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ list_for_each_entry(tgt, &dev->targets, list) {
+ if (!strcmp(name, tgt->disk->disk_name)) {
+ ret = true;
+ mutex_unlock(&dev->mlock);
+ goto out;
+ }
+ }
+ mutex_unlock(&dev->mlock);
+ }
+
+out:
+ up_write(&nvm_lock);
+ return ret;
+}
+
static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
int i;
@@ -104,7 +122,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+ int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map));
@@ -122,7 +140,8 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
}
static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
+ u16 lun_begin, u16 lun_end,
+ u16 op)
{
struct nvm_tgt_dev *tgt_dev = NULL;
struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -130,10 +149,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
+ int nr_chnls = nr_luns / dev->geo.nr_luns;
+ int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
+ int bch = lun_begin / dev->geo.nr_luns;
+ int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0;
int lun_balanced = 1;
int prev_nr_luns;
@@ -154,15 +173,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns)
goto err_luns;
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0;
@@ -199,8 +218,9 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.all_luns = nr_luns;
+ tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.op = op;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q;
tgt_dev->map = dev_map;
@@ -226,27 +246,79 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
-static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
- struct nvm_tgt_type *tmp, *tt = NULL;
+ struct nvm_tgt_type *tt;
- if (lock)
- down_write(&nvm_tgtt_lock);
+ list_for_each_entry(tt, &nvm_tgt_types, list)
+ if (!strcmp(name, tt->name))
+ return tt;
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
+ return NULL;
+}
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+ struct nvm_tgt_type *tt;
+
+ down_write(&nvm_tgtt_lock);
+ tt = __nvm_find_target_type(name);
+ up_write(&nvm_tgtt_lock);
- if (lock)
- up_write(&nvm_tgtt_lock);
return tt;
}
+static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
+ int lun_end)
+{
+ if (lun_begin > lun_end || lun_end >= geo->all_luns) {
+ pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ lun_begin, lun_end, geo->all_luns - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __nvm_config_simple(struct nvm_dev *dev,
+ struct nvm_ioctl_create_simple *s)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = geo->all_luns - 1;
+ }
+
+ return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
+}
+
+static int __nvm_config_extended(struct nvm_dev *dev,
+ struct nvm_ioctl_create_extended *e)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
+ e->lun_begin = 0;
+ e->lun_end = dev->geo.all_luns - 1;
+ }
+
+ /* op not set falls into target's default */
+ if (e->op == 0xFFFF)
+ e->op = NVM_TARGET_DEFAULT_OP;
+
+ if (e->op < NVM_TARGET_MIN_OP ||
+ e->op > NVM_TARGET_MAX_OP) {
+ pr_err("nvm: invalid over provisioning value\n");
+ return -EINVAL;
+ }
+
+ return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct nvm_ioctl_create_extended e;
struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
@@ -255,22 +327,41 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
void *targetdata;
int ret;
- tt = nvm_find_target_type(create->tgttype, 1);
+ switch (create->conf.type) {
+ case NVM_CONFIG_TYPE_SIMPLE:
+ ret = __nvm_config_simple(dev, &create->conf.s);
+ if (ret)
+ return ret;
+
+ e.lun_begin = create->conf.s.lun_begin;
+ e.lun_end = create->conf.s.lun_end;
+ e.op = NVM_TARGET_DEFAULT_OP;
+ break;
+ case NVM_CONFIG_TYPE_EXTENDED:
+ ret = __nvm_config_extended(dev, &create->conf.e);
+ if (ret)
+ return ret;
+
+ e = create->conf.e;
+ break;
+ default:
+ pr_err("nvm: config type not valid\n");
+ return -EINVAL;
+ }
+
+ tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
return -EINVAL;
}
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&dev->mlock);
+ if (nvm_target_exists(create->tgtname)) {
+ pr_err("nvm: target name already exists (%s)\n",
+ create->tgtname);
return -EINVAL;
}
- mutex_unlock(&dev->mlock);
- ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+ ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
if (ret)
return ret;
@@ -280,7 +371,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_reserve;
}
- tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
pr_err("nvm: could not create target device\n");
ret = -ENOMEM;
@@ -350,7 +441,7 @@ err_dev:
err_t:
kfree(t);
err_reserve:
- nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
return ret;
}
@@ -420,7 +511,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap;
int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
+ int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i];
@@ -524,41 +615,12 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}
-void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct nvm_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-EXPORT_SYMBOL(nvm_part_to_tgt);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
down_write(&nvm_tgtt_lock);
- if (nvm_find_target_type(tt->name, 0))
+ if (__nvm_find_target_type(tt->name))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
@@ -726,112 +788,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io_sync);
-int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.opcode = NVM_OP_ERASE;
- rqd.flags = geo->plane_mode >> 1;
-
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- if (ret)
- return ret;
-
- ret = nvm_submit_io_sync(tgt_dev, &rqd);
- if (ret) {
- pr_err("rrpr: erase I/O submission failed: %d\n", ret);
- goto free_ppa_list;
- }
-
-free_ppa_list:
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_sync);
-
-int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
- return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
-}
-EXPORT_SYMBOL(nvm_get_l2p_tbl);
-
-int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &dev->geo;
- struct nvm_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &dev->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &dev->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_get_area);
-
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &dev->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-EXPORT_SYMBOL(nvm_put_area);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -858,10 +814,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != geo->blks_per_lun * geo->plane_mode)
+ if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode;
blktype = blks[offset];
@@ -877,7 +833,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return geo->blks_per_lun;
+ return geo->nr_chks;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -892,53 +848,6 @@ int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
-static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- struct nvm_geo *geo = &dev->geo;
- int i;
-
- dev->lps_per_blk = geo->pgs_per_blk;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* Just a linear array */
- for (i = 0; i < dev->lps_per_blk; i++)
- dev->lptbl[i] = i;
-
- return 0;
-}
-
-static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- int i, p;
- struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
-
- if (!mlc->num_pairs)
- return 0;
-
- dev->lps_per_blk = mlc->num_pairs;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* The lower page table encoding consists of a list of bytes, where each
- * has a lower and an upper half. The first half byte maintains the
- * increment value and every value after is an offset added to the
- * previous incrementation value
- */
- dev->lptbl[0] = mlc->pairs[0] & 0xF;
- for (i = 1; i < dev->lps_per_blk; i++) {
- p = mlc->pairs[i >> 1];
- if (i & 0x1) /* upper */
- dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
- else /* lower */
- dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
- }
-
- return 0;
-}
-
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -946,66 +855,44 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* Whole device values */
geo->nr_chnls = grp->num_ch;
- geo->luns_per_chnl = grp->num_lun;
-
- /* Generic device values */
- geo->pgs_per_blk = grp->num_pg;
- geo->blks_per_lun = grp->num_blk;
- geo->nr_planes = grp->num_pln;
- geo->fpg_size = grp->fpg_sz;
- geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->nr_luns = grp->num_lun;
+
+ /* Generic device geometry values */
+ geo->ws_min = grp->ws_min;
+ geo->ws_opt = grp->ws_opt;
+ geo->ws_seq = grp->ws_seq;
+ geo->ws_per_chk = grp->ws_per_chk;
+ geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs;
geo->oob_size = grp->sos;
- geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap;
- memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
- if (grp->mpos & 0x020202)
- geo->plane_mode = NVM_PLANE_DOUBLE;
- if (grp->mpos & 0x040404)
- geo->plane_mode = NVM_PLANE_QUAD;
+ geo->sec_per_chk = grp->clba;
+ geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
+ geo->all_luns = geo->nr_luns * geo->nr_chnls;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* calculated values */
+ /* 1.2 spec device geometry values */
+ geo->plane_mode = 1 << geo->ws_seq;
+ geo->nr_planes = geo->ws_opt / geo->ws_min;
+ geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
- geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
- geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
- geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = geo->nr_luns * geo->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
+ dev->total_secs = geo->all_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- ret = -EINVAL;
- goto err_fmtype;
- }
-
INIT_LIST_HEAD(&dev->area_list);
INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
@@ -1031,7 +918,6 @@ static void nvm_free(struct nvm_dev *dev)
dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_unregister_map(dev);
- kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
}
@@ -1062,8 +948,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes,
- geo->pgs_per_blk, geo->blks_per_lun,
- geo->nr_luns, geo->nr_chnls);
+ geo->ws_per_chk, geo->nr_chks,
+ geo->all_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
@@ -1135,7 +1021,6 @@ EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
- struct nvm_ioctl_create_simple *s;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1146,23 +1031,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
- pr_err("nvm: config type not valid\n");
- return -EINVAL;
- }
- s = &create->conf.s;
-
- if (s->lun_begin == -1 && s->lun_end == -1) {
- s->lun_begin = 0;
- s->lun_end = dev->geo.nr_luns - 1;
- }
-
- if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
- return -EINVAL;
- }
-
return nvm_create_tgt(dev, create);
}
@@ -1262,6 +1130,12 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
return -EFAULT;
+ if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
+ create.conf.e.rsv != 0) {
+ pr_err("nvm: reserved config field in use\n");
+ return -EINVAL;
+ }
+
create.dev[DISK_NAME_LEN - 1] = '\0';
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 0d227ef7d1b9..000fcad38136 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -19,12 +19,16 @@
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
+ struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
+ unsigned long start_time = jiffies;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
+ generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
* rollback from here on.
@@ -67,6 +71,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
+ generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 76516ee84e9a..0487b9340c1d 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -32,8 +32,8 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
@@ -48,7 +48,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_dev_ppa_to_pos(geo, *ppa);
+ int pos = pblk_ppa_to_pos(geo, *ppa);
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
@@ -66,7 +66,7 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
+ line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
atomic_dec(&line->left_seblks);
if (rqd->error) {
@@ -144,7 +144,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_tgt_ppa_to_line(ppa);
+ line_id = pblk_ppa_to_line(ppa);
line = &pblk->lines[line_id];
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
@@ -650,7 +650,7 @@ next_rq:
} else {
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_dev_ppa_to_pos(geo, ppa);
+ int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
@@ -668,7 +668,7 @@ next_rq:
}
ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
@@ -742,7 +742,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ) {
+ } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -802,7 +802,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
if (rqd.error) {
if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
- else
+ else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -816,7 +816,7 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
@@ -854,8 +854,8 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not sync erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
- smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
+ smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */
if (cur) {
@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line);
- line->sec_in_line -= geo->sec_per_blk;
+ line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb)
nr_bb++;
}
@@ -1145,7 +1145,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
@@ -1233,7 +1233,7 @@ retry:
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, retry_line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, false);
if (pblk_line_erase(pblk, retry_line))
goto retry;
@@ -1252,7 +1252,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
- int is_next = 0;
spin_lock(&l_mg->free_lock);
line = pblk_line_get(pblk);
@@ -1280,7 +1279,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
@@ -1290,10 +1288,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
return NULL;
}
- pblk_rl_free_lines_dec(&pblk->rl, line);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
retry_setup:
if (!pblk_line_init_metadata(pblk, line, NULL)) {
line = pblk_line_retry(pblk, line);
@@ -1311,6 +1305,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+
return line;
}
@@ -1395,7 +1391,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
@@ -1444,6 +1439,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, new, true);
+
/* Allocate next line for preparation */
spin_lock(&l_mg->free_lock);
l_mg->data_next = pblk_line_get(pblk);
@@ -1457,13 +1454,9 @@ retry_setup:
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
out:
return new;
}
@@ -1561,8 +1554,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not async erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
}
return err;
@@ -1746,7 +1739,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int nr_luns = geo->nr_luns;
+ int nr_luns = geo->all_luns;
int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
@@ -1884,7 +1877,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_dev_ppa_to_line(ppa);
+ int line_id = pblk_ppa_to_line(ppa);
struct pblk_line *line = &pblk->lines[line_id];
kref_get(&line->ref);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 9c8e114c8a54..3d899383666e 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -169,7 +169,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
* the line untouched. TODO: Implement a recovery routine that scans and
* moves all sectors on the line.
*/
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
+ goto fail_free_emeta;
+ }
+
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
goto fail_free_emeta;
@@ -519,22 +526,12 @@ void pblk_gc_should_start(struct pblk *pblk)
}
}
-/*
- * If flush_wq == 1 then no lock should be held by the caller since
- * flush_workqueue can sleep
- */
-static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
-{
- pblk->gc.gc_active = 0;
- pr_debug("pblk: gc stop\n");
-}
-
void pblk_gc_should_stop(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
if (gc->gc_active && !gc->gc_forced)
- pblk_gc_stop(pblk, 0);
+ gc->gc_active = 0;
}
void pblk_gc_should_kick(struct pblk *pblk)
@@ -660,7 +657,7 @@ void pblk_gc_exit(struct pblk *pblk)
gc->gc_enabled = 0;
del_timer_sync(&gc->gc_timer);
- pblk_gc_stop(pblk, 1);
+ gc->gc_active = 0;
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 695826a06b5d..93d671ca518e 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
}
ppaf.ch_len = power_len;
- power_len = get_count_order(geo->luns_per_chnl);
- if (1 << power_len != geo->luns_per_chnl) {
+ power_len = get_count_order(geo->nr_luns);
+ if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL;
}
@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
- geo->nr_planes * geo->nr_luns;
+ geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk))
return -ENOMEM;
@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
+ pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
+ pblk_rec_cache);
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_e_rq_pool;
@@ -354,6 +355,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
+ pblk_rwb_free(pblk);
+
pblk_free_global_caches(pblk);
}
@@ -409,7 +412,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks;
int nr_blks, ret;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
+ nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
@@ -482,20 +485,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret;
/* TODO: Implement unbalanced LUN support */
- if (geo->luns_per_chnl < 0) {
+ if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL;
}
- pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
+ pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
+ GFP_KERNEL);
if (!pblk->luns)
return -ENOMEM;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */
int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls;
- int lunid = lun_raw + ch * geo->luns_per_chnl;
+ int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i];
rlun->bppa = luns[lunid];
@@ -577,22 +581,37 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
+ int sec_meta, blk_meta;
- pblk->over_pct = 20;
+ if (geo->op == NVM_TARGET_DEFAULT_OP)
+ pblk->op = PBLK_DEFAULT_OP;
+ else
+ pblk->op = geo->op;
provisioned = nr_free_blks;
- provisioned *= (100 - pblk->over_pct);
+ provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
+ pblk->op_blks = nr_free_blks - provisioned;
+
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
- pblk->capacity = provisioned * geo->sec_per_blk;
+ pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
+
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
+
+ pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
+
atomic_set(&pblk->rl.free_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
static int pblk_lines_alloc_metadata(struct pblk *pblk)
@@ -683,7 +702,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
- max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+ max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -693,26 +712,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL;
}
- div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+ div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL;
}
- l_mg->nr_lines = geo->blks_per_lun;
+ l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
- lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
- lm->blk_per_line = geo->nr_luns;
- lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
+ lm->blk_per_line = geo->all_luns;
+ lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
- lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4;
- lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
+ lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition
@@ -742,12 +761,12 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+ lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1;
- if (geo->nr_luns > 1)
+ if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
- lm->emeta_sec[0], geo->sec_per_blk);
+ lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -772,7 +791,7 @@ add_emeta_page:
goto fail_free_bb_template;
}
- bb_distance = (geo->nr_luns) * geo->sec_per_pl;
+ bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
@@ -844,7 +863,7 @@ add_emeta_page:
pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return 0;
@@ -858,7 +877,7 @@ fail_free_bb_template:
fail_free_meta:
pblk_line_meta_free(pblk);
fail:
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return ret;
@@ -866,15 +885,19 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
- mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
if (IS_ERR(pblk->writer_ts)) {
- pr_err("pblk: could not allocate writer kthread\n");
- return PTR_ERR(pblk->writer_ts);
+ int err = PTR_ERR(pblk->writer_ts);
+
+ if (err != -EINTR)
+ pr_err("pblk: could not allocate writer kthread (%d)\n",
+ err);
+ return err;
}
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
+ mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
+
return 0;
}
@@ -910,7 +933,6 @@ static void pblk_tear_down(struct pblk *pblk)
pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
- pblk_rwb_free(pblk);
pblk_rl_free(&pblk->rl);
pr_debug("pblk: consistent tear down\n");
@@ -1025,7 +1047,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_writer_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write thread\n");
+ if (ret != -EINTR)
+ pr_err("pblk: could not initialize write thread\n");
goto fail_free_lines;
}
@@ -1041,13 +1064,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false);
- tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
+ tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- geo->nr_luns, pblk->l_mg.nr_lines,
+ pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
+ tdisk->disk_name,
+ geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6f3ecde2140f..7445e6430c52 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -146,7 +146,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
return;
/* Erase blocks that are bad in this line but might not be in next */
- if (unlikely(ppa_empty(*erase_ppa)) &&
+ if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
int bit = -1;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b8f78e401482..ec8fc314646b 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -54,7 +54,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->sync_point = EMPTY_ENTRY;
+ rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
@@ -112,7 +112,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_DEBUG
- atomic_set(&rb->inflight_sync_point, 0);
+ atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
@@ -226,7 +226,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
@@ -349,35 +349,35 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
smp_store_release(&entry->w_ctx.flags, flags);
}
-static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
+static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
unsigned int pos)
{
struct pblk_rb_entry *entry;
- unsigned int subm, sync_point;
+ unsigned int sync, flush_point;
- subm = READ_ONCE(rb->subm);
+ sync = READ_ONCE(rb->sync);
+
+ if (pos == sync)
+ return 0;
#ifdef CONFIG_NVM_DEBUG
- atomic_inc(&rb->inflight_sync_point);
+ atomic_inc(&rb->inflight_flush_point);
#endif
- if (pos == subm)
- return 0;
+ flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
+ entry = &rb->entries[flush_point];
- sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[sync_point];
+ pblk_rb_sync_init(rb, NULL);
- /* Protect syncs */
- smp_store_release(&rb->sync_point, sync_point);
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, flush_point);
- if (!bio)
- return 0;
+ if (bio)
+ bio_list_add(&entry->w_ctx.bios, bio);
- spin_lock_irq(&rb->s_lock);
- bio_list_add(&entry->w_ctx.bios, bio);
- spin_unlock_irq(&rb->s_lock);
+ pblk_rb_sync_end(rb, NULL);
- return 1;
+ return bio ? 1 : 0;
}
static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
@@ -416,7 +416,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
struct pblk *pblk = container_of(rb, struct pblk, rwb);
unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_sync_point_set(rb, NULL, mem))
+ if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
pblk_write_should_kick(pblk);
@@ -440,7 +440,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->nr_flush);
#endif
- if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
+ if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
*io_ret = NVM_IO_OK;
}
@@ -606,21 +606,6 @@ try:
return NVM_IO_ERR;
}
- if (flags & PBLK_FLUSH_ENTRY) {
- unsigned int sync_point;
-
- sync_point = READ_ONCE(rb->sync_point);
- if (sync_point == pos) {
- /* Protect syncs */
- smp_store_release(&rb->sync_point, EMPTY_ENTRY);
- }
-
- flags &= ~PBLK_FLUSH_ENTRY;
-#ifdef CONFIG_NVM_DEBUG
- atomic_dec(&rb->inflight_sync_point);
-#endif
- }
-
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
@@ -730,15 +715,24 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
{
- unsigned int sync;
- unsigned int i;
-
+ unsigned int sync, flush_point;
lockdep_assert_held(&rb->s_lock);
sync = READ_ONCE(rb->sync);
+ flush_point = READ_ONCE(rb->flush_point);
- for (i = 0; i < nr_entries; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
+ if (flush_point != EMPTY_ENTRY) {
+ unsigned int secs_to_flush;
+
+ secs_to_flush = pblk_rb_ring_count(flush_point, sync,
+ rb->nr_entries);
+ if (secs_to_flush < nr_entries) {
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, EMPTY_ENTRY);
+ }
+ }
+
+ sync = (sync + nr_entries) & (rb->nr_entries - 1);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -746,22 +740,27 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
return sync;
}
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
+/* Calculate how many sectors to submit up to the current flush point. */
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
{
- unsigned int subm, sync_point;
- unsigned int count;
+ unsigned int subm, sync, flush_point;
+ unsigned int submitted, to_flush;
- /* Protect syncs */
- sync_point = smp_load_acquire(&rb->sync_point);
- if (sync_point == EMPTY_ENTRY)
+ /* Protect flush points */
+ flush_point = smp_load_acquire(&rb->flush_point);
+ if (flush_point == EMPTY_ENTRY)
return 0;
+ /* Protect syncs */
+ sync = smp_load_acquire(&rb->sync);
+
subm = READ_ONCE(rb->subm);
+ submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
/* The sync point itself counts as a sector to sync */
- count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
+ to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return count;
+ return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
/*
@@ -801,7 +800,7 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
(rb->sync == rb->l2p_update) &&
- (rb->sync_point == EMPTY_ENTRY)) {
+ (rb->flush_point == EMPTY_ENTRY)) {
goto out;
}
@@ -848,7 +847,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
queued_entries++;
spin_unlock_irq(&rb->s_lock);
- if (rb->sync_point != EMPTY_ENTRY)
+ if (rb->flush_point != EMPTY_ENTRY)
offset = scnprintf(buf, PAGE_SIZE,
"%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
rb->nr_entries,
@@ -857,14 +856,14 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
- rb->sync_point,
+ rb->flush_point,
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
else
offset = scnprintf(buf, PAGE_SIZE,
@@ -875,13 +874,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
return offset;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index ca79d8fb3e60..2f761283f43e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -141,7 +141,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
struct ppa_addr ppa = ppa_list[i];
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
kref_put(&line->ref, pblk_line_put_wq);
}
}
@@ -158,8 +158,12 @@ static void pblk_end_user_read(struct bio *bio)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
+ unsigned long start_time = r_ctx->start_time;
+
+ generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -193,9 +197,9 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
@@ -270,7 +274,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
struct pblk_line *line = &pblk->lines[line_id];
kref_put(&line->ref, pblk_line_put);
@@ -306,6 +310,8 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
return NVM_IO_OK;
err:
+ pr_err("pblk: failed to perform partial read\n");
+
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
@@ -357,6 +363,7 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
@@ -372,6 +379,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
+ generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -383,6 +392,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->start_time = jiffies;
r_ctx->lba = blba;
/* Save the index for this bio's start. This is needed in case
@@ -422,7 +432,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
- return NVM_IO_ERR;
+ goto fail_end_io;
}
rqd->bio = int_bio;
@@ -433,7 +443,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
- return ret;
+ goto fail_end_io;
}
return NVM_IO_OK;
@@ -442,17 +452,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
- if (ret) {
- pr_err("pblk: failed to perform partial read\n");
- return ret;
- }
-
- return NVM_IO_OK;
+ return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
+fail_end_io:
+ __pblk_end_io_read(pblk, rqd, false);
+ return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index eadb3eb5d4dc..1d5e961bf5e0 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -111,18 +111,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
return 0;
}
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
crc = pblk_calc_emeta_crc(pblk, emeta_buf);
if (le32_to_cpu(emeta_buf->crc) != crc)
- return NULL;
+ return 1;
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
- return NULL;
+ return 1;
- return emeta_to_lbas(pblk, emeta_buf);
+ return 0;
}
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -137,7 +137,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
u64 nr_valid_lbas, nr_lbas = 0;
u64 i;
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list)
return 1;
@@ -149,7 +149,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct ppa_addr ppa;
int pos;
- ppa = addr_to_pblk_ppa(pblk, i, line->id);
+ ppa = addr_to_gen_ppa(pblk, i, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
/* Do not update bad blocks */
@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->sec_per_blk;
+ nr_bb * geo->sec_per_chk;
}
struct pblk_recov_alloc {
@@ -263,12 +263,12 @@ next_read_rq:
int pos;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
r_ptr_int += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
@@ -288,7 +288,7 @@ next_read_rq:
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
*/
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -411,12 +411,12 @@ next_pad_rq:
int pos;
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
}
@@ -541,12 +541,12 @@ next_rq:
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
@@ -672,12 +672,12 @@ next_rq:
paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
paddr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
@@ -817,7 +817,7 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
while (emeta_secs) {
emeta_start--;
- ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
if (!test_bit(pos, line->blk_bitmap))
emeta_secs--;
@@ -938,6 +938,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
goto next;
}
+ if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
if (pblk_recov_l2p_from_emeta(pblk, line))
pblk_recov_l2p_from_oob(pblk, line);
@@ -984,10 +989,8 @@ next:
}
spin_unlock(&l_mg->free_lock);
- if (is_next) {
+ if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
- }
out:
if (found_lines != recovered_lines)
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc71922260..0d457b162f23 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
return atomic_read(&rl->free_blocks);
}
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+ return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+ unsigned long free_blocks)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
- unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
int max = rl->rb_budget;
if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
pblk_gc_should_stop(pblk);
}
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+ __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_add(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+ free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_sub(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+
+ if (used)
+ free_blocks = atomic_sub_return(blk_in_line,
+ &rl->free_user_blocks);
+ else
+ free_blocks = atomic_read(&rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
void pblk_rl_init(struct pblk_rl *rl, int budget)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+ int sec_meta, blk_meta;
+
unsigned int rb_windows;
- rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
- rl->high_pw = get_count_order(rl->high);
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
- rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
- if (rl->low < min_blocks)
- rl->low = min_blocks;
+ rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+ rl->high_pw = get_count_order(rl->high);
rl->rsv_blocks = min_blocks;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index cd49e8875d4e..620bab853579 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0;
int i;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
int active = 1;
rlun = &pblk->luns[i];
@@ -49,11 +49,12 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
{
- int free_blocks, total_blocks;
+ int free_blocks, free_user_blocks, total_blocks;
int rb_user_max, rb_user_cnt;
int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
- free_blocks = atomic_read(&pblk->rl.free_blocks);
+ free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
+ free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rb_user_max = pblk->rl.rb_user_max;
rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rb_gc_max = pblk->rl.rb_gc_max;
@@ -64,16 +65,16 @@ static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
total_blocks = pblk->rl.total_blocks;
return snprintf(page, PAGE_SIZE,
- "u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+ "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
rb_user_cnt,
rb_user_max,
rb_gc_cnt,
rb_gc_max,
rb_state,
rb_budget,
- pblk->rl.low,
pblk->rl.high,
free_blocks,
+ free_user_blocks,
total_blocks,
READ_ONCE(pblk->rl.rb_user_active));
}
@@ -238,7 +239,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
- geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
+ geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
@@ -287,7 +288,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
- geo->sec_per_blk);
+ geo->sec_per_chk);
return sz;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6c1cafafef53..aae86ed60b98 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -21,13 +21,28 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
+ struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
+ int pos = c_ctx->sentry + i;
+ int flags;
+
+ w_ctx = pblk_rb_w_ctx(rwb, pos);
+ flags = READ_ONCE(w_ctx->flags);
+
+ if (flags & PBLK_FLUSH_ENTRY) {
+ flags &= ~PBLK_FLUSH_ENTRY;
+ /* Release flags on context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_dec(&rwb->inflight_flush_point);
+#endif
+ }
- w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
@@ -439,7 +454,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *meta_line;
int err;
- ppa_set_empty(&erase_ppa);
+ pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
@@ -457,7 +472,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
}
- if (!ppa_empty(erase_ppa)) {
+ if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
@@ -508,7 +523,7 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_avail)
return 1;
- secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 59a64d461a5d..8c357fb6538e 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -51,17 +51,16 @@
#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-#define pblk_for_each_lun(pblk, rlun, i) \
- for ((i) = 0, rlun = &(pblk)->luns[0]; \
- (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-
/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
+#define PBLK_DEFAULT_OP (11)
+
enum {
PBLK_READ = READ,
PBLK_WRITE = WRITE,/* Write from write buffer */
PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_READ_RECOV, /* Recovery read - errors allowed */
PBLK_ERASE,
};
@@ -114,6 +113,7 @@ struct pblk_c_ctx {
/* read context */
struct pblk_g_ctx {
void *private;
+ unsigned long start_time;
u64 lba;
};
@@ -170,7 +170,7 @@ struct pblk_rb {
* the last submitted entry that has
* been successfully persisted to media
*/
- unsigned int sync_point; /* Sync point - last entry that must be
+ unsigned int flush_point; /* Sync point - last entry that must be
* flushed to the media. Used with
* REQ_FLUSH and REQ_FUA
*/
@@ -193,7 +193,7 @@ struct pblk_rb {
spinlock_t s_lock; /* Sync lock */
#ifdef CONFIG_NVM_DEBUG
- atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
+ atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -256,9 +256,6 @@ struct pblk_rl {
unsigned int high; /* Upper threshold for rate limiter (free run -
* user I/O rate limiter
*/
- unsigned int low; /* Lower threshold for rate limiter (user I/O
- * rate limiter - stall)
- */
unsigned int high_pw; /* High rounded up as a power of 2 */
#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
@@ -292,7 +289,9 @@ struct pblk_rl {
unsigned long long nr_secs;
unsigned long total_blocks;
- atomic_t free_blocks;
+
+ atomic_t free_blocks; /* Total number of free blocks (+ OP) */
+ atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
};
#define PBLK_LINE_EMPTY (~0U)
@@ -583,7 +582,9 @@ struct pblk {
*/
sector_t capacity; /* Device capacity when bad blocks are subtracted */
- int over_pct; /* Percentage of device used for over-provisioning */
+
+ int op; /* Percentage of device used for over-provisioning */
+ int op_blks; /* Number of blocks used for over-provisioning */
/* pblk provisioning values. Used by rate limiter */
struct pblk_rl rl;
@@ -691,7 +692,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
unsigned int pblk_rb_read_count(struct pblk_rb *rb);
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
@@ -812,7 +813,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp);
@@ -843,6 +844,7 @@ void pblk_rl_free(struct pblk_rl *rl);
void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
@@ -851,7 +853,8 @@ void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -907,28 +910,47 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
+ return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
}
-static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_line(struct ppa_addr p)
{
return p.g.blk;
}
-static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
{
- return p.g.blk;
+ return p.g.lun * geo->nr_chnls + p.g.ch;
}
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
+ u64 line_id)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ ppa.g.blk = line_id;
+ ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
+ ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
+ ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
+ ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
+ ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
+
+ return ppa;
}
-/* A block within a line corresponds to the lun */
-static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ u64 paddr;
+
+ paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
+ paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
+ paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
+ paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
+ paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+
+ return paddr;
}
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
@@ -960,24 +982,6 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
return ppa64;
}
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
- sector_t lba)
-{
- struct ppa_addr ppa;
-
- if (pblk->ppaf_bitsize < 32) {
- u32 *map = (u32 *)pblk->trans_map;
-
- ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
- } else {
- struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
- ppa = map[lba];
- }
-
- return ppa;
-}
-
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
u32 ppa32 = 0;
@@ -999,33 +1003,36 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
return ppa32;
}
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
- struct ppa_addr ppa)
+static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
+ sector_t lba)
{
+ struct ppa_addr ppa;
+
if (pblk->ppaf_bitsize < 32) {
u32 *map = (u32 *)pblk->trans_map;
- map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
} else {
- u64 *map = (u64 *)pblk->trans_map;
+ struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
- map[lba] = ppa.ppa;
+ ppa = map[lba];
}
+
+ return ppa;
}
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
- struct ppa_addr p)
+static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa)
{
- u64 paddr;
+ if (pblk->ppaf_bitsize < 32) {
+ u32 *map = (u32 *)pblk->trans_map;
- paddr = 0;
- paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
- paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
- paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
- paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
- paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+ map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ } else {
+ u64 *map = (u64 *)pblk->trans_map;
- return paddr;
+ map[lba] = ppa.ppa;
+ }
}
static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
@@ -1040,10 +1047,7 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
- if (lppa.ppa == rppa.ppa)
- return true;
-
- return false;
+ return (lppa.ppa == rppa.ppa);
}
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
@@ -1066,32 +1070,6 @@ static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
return p;
}
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.blk = line_id;
- ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
- ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
- ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
- ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
- ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
-
- return ppa;
-}
-
-static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-
- return ppa;
-}
-
static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
struct line_header *header)
{
@@ -1212,10 +1190,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls &&
- ppa->g.lun < geo->luns_per_chnl &&
+ ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes &&
- ppa->g.blk < geo->blks_per_lun &&
- ppa->g.pg < geo->pgs_per_blk &&
+ ppa->g.blk < geo->nr_chks &&
+ ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg)
continue;
@@ -1245,7 +1223,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
for (i = 0; i < rqd->nr_ppas; i++) {
ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1288,11 +1266,6 @@ static inline unsigned int pblk_get_secs(struct bio *bio)
return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}
-static inline sector_t pblk_get_sector(sector_t lba)
-{
- return lba * NR_PHY_IN_LOG;
-}
-
static inline void pblk_setup_uuid(struct pblk *pblk)
{
uuid_le uuid;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
deleted file mode 100644
index 0993c14be860..000000000000
--- a/drivers/lightnvm/rrpc.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#include "rrpc.h"
-
-static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
-
-#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
-
-static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
-
- lockdep_assert_held(&rrpc->rev_lock);
-
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
-
- spin_lock(&rblk->lock);
-
- div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
-
- spin_unlock(&rblk->lock);
-
- rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
-}
-
-static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
-{
- sector_t i;
-
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
-
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
-}
-
-static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
-{
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
-
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
-
- return rqd;
-}
-
-static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
-
- rrpc_unlock_laddr(rrpc, inf);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
-{
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
-
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
-
- schedule();
- }
-
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
-
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
-}
-
-static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- return (rblk->next_page == dev->geo.sec_per_blk);
-}
-
-/* Calculate relative addr for the given block, considering instantiated LUNs */
-static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return rlun->id * dev->geo.sec_per_blk;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
- struct rrpc_addr *gp)
-{
- struct rrpc_block *rblk = gp->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- u64 addr = gp->addr;
- struct ppa_addr paddr;
-
- paddr.ppa = addr;
- paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
- paddr.g.ch = rlun->bppa.g.ch;
- paddr.g.lun = rlun->bppa.g.lun;
- paddr.g.blk = rblk->id;
-
- return paddr;
-}
-
-/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
-{
- struct rrpc *rrpc = rlun->rrpc;
-
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
-}
-
-static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
- struct rrpc_lun *rlun)
-{
- struct rrpc_block *rblk = NULL;
-
- if (list_empty(&rlun->free_list))
- goto out;
-
- rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
-
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
-
-out:
- return rblk;
-}
-
-static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&rlun->lock);
- if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
- pr_err("nvm: rrpc: cannot give block to non GC request\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
-
- rblk = __rrpc_get_blk(rrpc, rlun);
- if (!rblk) {
- pr_err("nvm: rrpc: cannot get new block\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
- spin_unlock(&rlun->lock);
-
- bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
-
- return rblk;
-}
-
-static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- if (rblk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&rblk->list, &rlun->free_list);
- rlun->nr_free_blocks++;
- rblk->state = NVM_BLK_ST_FREE;
- } else if (rblk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id, rblk->state);
- list_move_tail(&rblk->list, &rlun->bb_list);
- }
- spin_unlock(&rlun->lock);
-}
-
-static void rrpc_put_blks(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
-}
-
-static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
-{
- int next = atomic_inc_return(&rrpc->next_lun);
-
- return &rrpc->luns[next % rrpc->nr_luns];
-}
-
-static void rrpc_gc_kick(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- unsigned int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
-}
-
-/*
- * timed GC every interval.
- */
-static void rrpc_gc_timer(struct timer_list *t)
-{
- struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
-
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void rrpc_end_sync_bio(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- if (bio->bi_status)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
-
- complete(waiting);
-}
-
-/*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
-static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct request_queue *q = dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = dev->geo.sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
-
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
-
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
-
- /* Lock laddr */
- phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
-
-try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
-
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
-
- spin_unlock(&rrpc->rev_lock);
-
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_status) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
-
- bio_reset(bio);
- reinit_completion(&wait);
-
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
-
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_status)
- goto finished;
-
- bio_reset(bio);
- }
-
-finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
-
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void rrpc_block_gc(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct ppa_addr ppa;
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
- ppa.g.blk = rblk->id;
-
- if (nvm_erase_sync(rrpc->dev, &ppa, 1))
- goto put_back;
-
- rrpc_put_blk(rrpc, rblk);
-
- return;
-
-put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-}
-
-/* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
-static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
-{
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
-
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
-}
-
-/* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
-static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
-{
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblk, *max;
-
- BUG_ON(list_empty(prio_list));
-
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblk, prio_list, prio)
- max = rblk_max_invalid(max, rblk);
-
- return max;
-}
-
-static void rrpc_lun_gc(struct work_struct *work)
-{
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
-
- nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
-
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
-
- spin_lock(&rlun->lock);
- while (nr_blocks_need > rlun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblk = block_prio_find_max(rlun);
-
- if (!rblk->nr_invalid_pages)
- break;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
- list_del_init(&rblk->prio);
-
- WARN_ON(!block_is_full(rrpc, rblk));
-
- pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
-
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
-
- /* TODO: Hint that request queue can be started again */
-}
-
-static void rrpc_gc_queue(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-}
-
-static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
-};
-
-static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
-{
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
-
- if (!is_gc)
- return get_next_lun(rrpc);
-
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->nr_free_blocks > max_free->nr_free_blocks)
- max_free = rlun;
- }
-
- return max_free;
-}
-
-static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
-{
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
-
- BUG_ON(laddr >= rrpc->nr_sects);
-
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
-
- gp->addr = paddr;
- gp->rblk = rblk;
-
- rev = &rrpc->rev_trans_map[gp->addr];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
-
- return gp;
-}
-
-static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- u64 addr = ADDR_EMPTY;
-
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
-
- addr = rblk->next_page;
-
- rblk->next_page++;
-out:
- spin_unlock(&rblk->lock);
- return addr;
-}
-
-/* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
-static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
-{
- struct nvm_tgt_dev *tgt_dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct rrpc_addr *p;
- struct ppa_addr ppa;
- u64 paddr;
- int gc_force = 0;
-
- ppa.ppa = ADDR_EMPTY;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
-
- if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
- return ppa;
-
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
-
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
-retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
-
- if (paddr != ADDR_EMPTY)
- goto done;
-
- if (!list_empty(&rlun->wblk_list)) {
-new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
-
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
-
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
-
- pr_err("rrpc: failed to allocate new block\n");
- return ppa;
-done:
- spin_unlock(&rlun->lock);
- p = rrpc_update_map(rrpc, laddr, rblk, paddr);
- if (!p)
- return ppa;
-
- /* return global address */
- return rrpc_ppa_to_gaddr(tgt_dev, p);
-}
-
-static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_block_gc *gcb;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
-
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-}
-
-static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
-{
- struct rrpc_lun *rlun = NULL;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
- rrpc->luns[i].bppa.g.lun == p.g.lun) {
- rlun = &rrpc->luns[i];
- break;
- }
- }
-
- return rlun;
-}
-
-static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
-
- rlun = rrpc_ppa_to_lun(rrpc, ppa);
- rblk = &rlun->blocks[ppa.g.blk];
- rblk->state = NVM_BLK_ST_BAD;
-
- nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
-}
-
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- void *comp_bits = &rqd->ppa_status;
- struct ppa_addr ppa, prev_ppa;
- int nr_ppas = rqd->nr_ppas;
- int bit;
-
- if (rqd->nr_ppas == 1)
- __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
-
- ppa_set_empty(&prev_ppa);
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- ppa = rqd->ppa_list[bit];
- if (ppa_cmp_blk(ppa, prev_ppa))
- continue;
-
- __rrpc_mark_bad_block(rrpc, ppa);
- }
-}
-
-static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- int cmnt_size, i;
-
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
-
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == dev->geo.sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
-}
-
-static void rrpc_end_io(struct nvm_rq *rqd)
-{
- struct rrpc *rrpc = rqd->private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
-
- if (bio_data_dir(rqd->bio) == WRITE) {
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- rrpc_mark_bad_block(rrpc, rqd);
-
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- }
-
- bio_put(rqd->bio);
-
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
-
- rrpc_unlock_rq(rrpc, rqd);
-
- if (npages > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
-
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
-{
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- BUG_ON(!(laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
-
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct ppa_addr p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_list[i] = p;
- }
-
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct ppa_addr p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_addr = p;
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
-
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
-
- return rrpc_read_rq(rrpc, bio, rqd, flags);
-}
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- int err;
-
- if (bio_size < dev->geo.sec_size)
- return NVM_IO_ERR;
- else if (bio_size > dev->geo.max_rq_size)
- return NVM_IO_ERR;
-
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
-
- bio_get(bio);
- rqd->bio = bio;
- rqd->private = rrpc;
- rqd->nr_ppas = nr_pages;
- rqd->end_io = rrpc_end_io;
- rrq->flags = flags;
-
- err = nvm_submit_io(dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
-
- return NVM_IO_OK;
-}
-
-static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
-{
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
-
- blk_queue_split(q, &bio);
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- memset(rqd, 0, sizeof(struct nvm_rq));
-
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
-
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
-}
-
-static void rrpc_requeue(struct work_struct *work)
-{
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
-
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
-}
-
-static void rrpc_gc_free(struct rrpc *rrpc)
-{
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
-
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
-}
-
-static int rrpc_gc_init(struct rrpc *rrpc)
-{
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
-
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
-
- timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
-
- return 0;
-}
-
-static void rrpc_map_free(struct rrpc *rrpc)
-{
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
-}
-
-static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- u64 i;
-
- for (i = 0; i < nlb; i++) {
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
-
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- pr_err("nvm: Maybe loaded an old target L2P\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- div_u64_rem(pba, rrpc->nr_sects, &mod);
-
- gaddr = rrpc_recov_addr(dev, pba);
- rlun = rrpc_ppa_to_lun(rrpc, gaddr);
- if (!rlun) {
- pr_err("rrpc: l2p corruption on lba %llu\n",
- slba + i);
- return -EINVAL;
- }
-
- rblk = &rlun->blocks[gaddr.g.blk];
- if (!rblk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
- }
-
- addr[i].addr = pba;
- addr[i].rblk = rblk;
- raddr[mod].addr = slba + i;
- }
-
- return 0;
-}
-
-static int rrpc_map_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
-
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
-
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
-
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
-
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
-
- /* Bring up the mapping table from device */
- ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
-#define ADDR_POOL_SIZE 64
-
-static int rrpc_core_init(struct rrpc *rrpc)
-{
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
-
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
-
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
-
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
-
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
-
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
-
- return 0;
-}
-
-static void rrpc_core_free(struct rrpc *rrpc)
-{
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
-}
-
-static void rrpc_luns_free(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- vfree(rlun->blocks);
- }
-
- kfree(rrpc->luns);
-}
-
-static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
-{
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_block *rblk;
- struct ppa_addr ppa;
- u8 *blks;
- int nr_blks;
- int i;
- int ret;
-
- if (!dev->parent->ops->get_bb_tbl)
- return 0;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret) {
- pr_err("rrpc: could not get BB table\n");
- goto out;
- }
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0) {
- ret = nr_blks;
- goto out;
- }
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_FREE)
- continue;
-
- rblk = &rlun->blocks[i];
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- rlun->nr_free_blocks--;
- }
-
-out:
- kfree(blks);
- return ret;
-}
-
-static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
-{
- rlun->bppa.ppa = 0;
- rlun->bppa.g.ch = ppa.g.ch;
- rlun->bppa.g.lun = ppa.g.lun;
-}
-
-static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
-
- if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
-
- spin_lock_init(&rrpc->rev_lock);
-
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
-
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rlun->id = i;
- rrpc_set_lun_ppa(rlun, luns[i]);
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- geo->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
-
- INIT_LIST_HEAD(&rlun->free_list);
- INIT_LIST_HEAD(&rlun->used_list);
- INIT_LIST_HEAD(&rlun->bb_list);
-
- for (j = 0; j < geo->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
-
- rblk->id = j;
- rblk->rlun = rlun;
- rblk->state = NVM_BLK_T_FREE;
- INIT_LIST_HEAD(&rblk->prio);
- INIT_LIST_HEAD(&rblk->list);
- spin_lock_init(&rblk->lock);
-
- list_add_tail(&rblk->list, &rlun->free_list);
- }
-
- rlun->rrpc = rrpc;
- rlun->nr_free_blocks = geo->blks_per_lun;
- rlun->reserved_blocks = 2; /* for GC only */
-
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
-
- if (rrpc_bb_discovery(dev, rlun))
- goto err;
-
- }
-
- return 0;
-err:
- return ret;
-}
-
-/* returns 0 on success and stores the beginning address in *begin */
-static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t size = rrpc->nr_sects * dev->geo.sec_size;
- int ret;
-
- size >>= 9;
-
- ret = nvm_get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->geo.sec_size) - 9);
-
- return ret;
-}
-
-static void rrpc_area_free(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
-
- nvm_put_area(dev, begin);
-}
-
-static void rrpc_free(struct rrpc *rrpc)
-{
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
-
- kfree(rrpc);
-}
-
-static void rrpc_exit(void *private)
-{
- struct rrpc *rrpc = private;
-
- del_timer(&rrpc->gc_timer);
-
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
-
- rrpc_free(rrpc);
-}
-
-static sector_t rrpc_capacity(void *private)
-{
- struct rrpc *rrpc = private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
-
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
-
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
-
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
-}
-
-/*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
-static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
-
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
- paddr = bpaddr + offset;
-
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
-
- laddr = &rrpc->trans_map[pladdr];
-
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
-}
-
-static int rrpc_blocks_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
-
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
-
- for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
-
- return 0;
-}
-
-static int rrpc_luns_configure(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
-
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
-
- return 0;
-err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
-}
-
-static struct nvm_tgt_type tt_rrpc;
-
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
- int flags)
-{
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
-
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
-
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
-
- rrpc->dev = dev;
- rrpc->disk = tdisk;
-
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
-
- rrpc->nr_luns = geo->nr_luns;
- rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
-
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
-
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
-
- ret = rrpc_luns_init(rrpc, dev->luns);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
-
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
-
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
-
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
-
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
-
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
-
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
-
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-
- return rrpc;
-err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
-}
-
-/* round robin, page-based FTL, and cost-based GC */
-static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
-
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
-
- .init = rrpc_init,
- .exit = rrpc_exit,
-};
-
-static int __init rrpc_module_init(void)
-{
- return nvm_register_tgt_type(&tt_rrpc);
-}
-
-static void rrpc_module_exit(void)
-{
- nvm_unregister_tgt_type(&tt_rrpc);
-}
-
-module_init(rrpc_module_init);
-module_exit(rrpc_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
deleted file mode 100644
index fdb6ff902903..000000000000
--- a/drivers/lightnvm/rrpc.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#ifndef RRPC_H_
-#define RRPC_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 10
-#define GC_TIME_SECS 100
-
-#define RRPC_SECTOR (512)
-#define RRPC_EXPOSED_PAGE_SIZE (4096)
-
-#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
-
-struct rrpc_inflight {
- struct list_head reqs;
- spinlock_t lock;
-};
-
-struct rrpc_inflight_rq {
- struct list_head list;
- sector_t l_start;
- sector_t l_end;
-};
-
-struct rrpc_rq {
- struct rrpc_inflight_rq inflight_rq;
- unsigned long flags;
-};
-
-struct rrpc_block {
- int id; /* id inside of LUN */
- struct rrpc_lun *rlun;
-
- struct list_head prio; /* LUN CG list */
- struct list_head list; /* LUN free, used, bb list */
-
-#define MAX_INVALID_PAGES_STORAGE 8
- /* Bitmap for invalid page intries */
- unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
- /* points to the next writable page within a block */
- unsigned int next_page;
- /* number of pages that are invalid, wrt host page size */
- unsigned int nr_invalid_pages;
-
- int state;
-
- spinlock_t lock;
- atomic_t data_cmnt_size; /* data pages committed to stable storage */
-};
-
-struct rrpc_lun {
- struct rrpc *rrpc;
-
- int id;
- struct ppa_addr bppa;
-
- struct rrpc_block *cur, *gc_cur;
- struct rrpc_block *blocks; /* Reference to block allocation */
-
- struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head wblk_list; /* Queued blocks to be written to */
-
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
- unsigned int nr_free_blocks; /* Number of unused blocks */
-
- struct work_struct ws_gc;
-
- int reserved_blocks;
-
- spinlock_t lock;
-};
-
-struct rrpc {
- struct nvm_tgt_dev *dev;
- struct gendisk *disk;
-
- sector_t soffset; /* logical sector offset */
-
- int nr_luns;
- struct rrpc_lun *luns;
-
- /* calculated values */
- unsigned long long nr_sects;
-
- /* Write strategy variables. Move these into each for structure for each
- * strategy
- */
- atomic_t next_lun; /* Whenever a page is written, this is updated
- * to point to the next write lun
- */
-
- spinlock_t bio_lock;
- struct bio_list requeue_bios;
- struct work_struct ws_requeue;
-
- /* Simple translation map of logical addresses to physical addresses.
- * The logical addresses is known by the host system, while the physical
- * addresses are used when writing to the disk block device.
- */
- struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
-
- struct rrpc_inflight inflights;
-
- mempool_t *addr_pool;
- mempool_t *page_pool;
- mempool_t *gcb_pool;
- mempool_t *rq_pool;
-
- struct timer_list gc_timer;
- struct workqueue_struct *krqd_wq;
- struct workqueue_struct *kgc_wq;
-};
-
-struct rrpc_block_gc {
- struct rrpc *rrpc;
- struct rrpc_block *rblk;
- struct work_struct ws_gc;
-};
-
-/* Logical to physical mapping */
-struct rrpc_addr {
- u64 addr;
- struct rrpc_block *rblk;
-};
-
-/* Physical to logical mapping */
-struct rrpc_rev_addr {
- u64 addr;
-};
-
-static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- return l;
-}
-
-static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
-{
- return linear_to_generic_addr(&dev->geo, pba);
-}
-
-static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
-}
-
-static inline sector_t rrpc_get_laddr(struct bio *bio)
-{
- return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int rrpc_get_pages(struct bio *bio)
-{
- return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
-}
-
-static inline sector_t rrpc_get_sector(sector_t laddr)
-{
- return laddr * NR_PHY_IN_LOG;
-}
-
-static inline int request_intersects(struct rrpc_inflight_rq *r,
- sector_t laddr_start, sector_t laddr_end)
-{
- return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
-}
-
-static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages, struct rrpc_inflight_rq *r)
-{
- sector_t laddr_end = laddr + pages - 1;
- struct rrpc_inflight_rq *rtmp;
-
- WARN_ON(irqs_disabled());
-
- spin_lock_irq(&rrpc->inflights.lock);
- list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
- if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
- /* existing, overlapping request, come back later */
- spin_unlock_irq(&rrpc->inflights.lock);
- return 1;
- }
- }
-
- r->l_start = laddr;
- r->l_end = laddr_end;
-
- list_add_tail(&r->list, &rrpc->inflights.reqs);
- spin_unlock_irq(&rrpc->inflights.lock);
- return 0;
-}
-
-static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages,
- struct rrpc_inflight_rq *r)
-{
- BUG_ON((laddr + pages) > rrpc->nr_sects);
-
- return __rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
-{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-
- return &rrqd->inflight_rq;
-}
-
-static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd)
-{
- sector_t laddr = rrpc_get_laddr(bio);
- unsigned int pages = rrpc_get_pages(bio);
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-
- return rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
- struct rrpc_inflight_rq *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rrpc->inflights.lock, flags);
- list_del_init(&r->list);
- spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
-}
-
-static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_ppas;
-
- BUG_ON((r->l_start + pages) > rrpc->nr_sects);
-
- rrpc_unlock_laddr(rrpc, r);
-}
-
-#endif /* RRPC_H_ */
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 899ec1f4c833..346e6f5f77be 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1245,10 +1245,10 @@ static ssize_t smu_read(struct file *file, char __user *buf,
return -EBADFD;
}
-static unsigned int smu_fpoll(struct file *file, poll_table *wait)
+static __poll_t smu_fpoll(struct file *file, poll_table *wait)
{
struct smu_private *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c4c2b3b85ebc..e8b29fc532e1 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2157,11 +2157,11 @@ pmu_write(struct file *file, const char __user *buf,
return 0;
}
-static unsigned int
+static __poll_t
pmu_fpoll(struct file *filp, poll_table *wait)
{
struct pmu_private *pp = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 93f3d4d61fa7..f84730d63b1f 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -235,7 +235,7 @@ kfree_err:
return ret;
}
-static unsigned int
+static __poll_t
mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
{
struct mbox_test_device *tdev = filp->private_data;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 83b9362be09c..2c8ac3688815 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,13 @@ config DM_BIO_PRISON
source "drivers/md/persistent-data/Kconfig"
+config DM_UNSTRIPED
+ tristate "Unstriped target"
+ depends on BLK_DEV_DM
+ ---help---
+ Unstripes I/O so it is issued solely on a single drive in a HW
+ RAID0 or dm-striped target.
+
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f701bb211783..63255f3ebd97 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a0cc1bc6d884..6cc6c0f9c3a9 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -525,15 +525,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 843877e017e1..5e2d4e80198e 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -320,14 +320,15 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct bch_ratelimit writeback_rate;
- struct delayed_work writeback_rate_update;
-
/*
- * Internal to the writeback code, so read_dirty() can keep track of
- * where it's at.
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
*/
- sector_t last_read;
+ atomic_t backing_idle;
+
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
@@ -336,6 +337,14 @@ struct cached_dev {
struct keybuf writeback_keys;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
/* For tracking sequential IO */
#define RECENT_IO_BITS 7
#define RECENT_IO (1 << RECENT_IO_BITS)
@@ -488,6 +497,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
+ unsigned devices_max_used;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
struct closure caching;
@@ -852,7 +862,7 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */
-void bch_count_io_errors(struct cache *, blk_status_t, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81e8dc3dbe5e..bf3a48aa9a9a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -432,6 +432,7 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
+ /* No problem for multipage bvec since the bio is just allocated */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -1678,7 +1679,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
/* don't reclaim buckets to which writeback keys point */
rcu_read_lock();
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
@@ -1803,10 +1804,7 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
- if (IS_ERR(c->gc_thread))
- return PTR_ERR(c->gc_thread);
-
- return 0;
+ return PTR_ERR_OR_ZERO(c->gc_thread);
}
/* Initial partial gc */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 1841d0359bac..7f12920c14f7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
#include "closure.h"
@@ -18,10 +19,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
- /* Must deliver precisely one wakeup */
- if (r == 1 && (flags & CLOSURE_SLEEPING))
- wake_up_process(cl->task);
-
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
@@ -100,28 +97,34 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
}
EXPORT_SYMBOL(closure_wait);
-/**
- * closure_sync - sleep until a closure has nothing left to wait on
- *
- * Sleeps until the refcount hits 1 - the thread that's running the closure owns
- * the last refcount.
- */
-void closure_sync(struct closure *cl)
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
{
- while (1) {
- __closure_start_sleep(cl);
- closure_set_ret_ip(cl);
+ cl->s->done = 1;
+ wake_up_process(cl->s->task);
+}
- if ((atomic_read(&cl->remaining) &
- CLOSURE_REMAINING_MASK) == 1)
- break;
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
schedule();
}
- __closure_end_sleep(cl);
+ __set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(closure_sync);
+EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -168,12 +171,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s\n",
+ seq_printf(f, "%s%s\n",
test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
- r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "");
+ r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ccfbea6f9f6b..3b9dfc9962ad 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -103,6 +103,7 @@
*/
struct closure;
+struct closure_syncer;
typedef void (closure_fn) (struct closure *);
struct closure_waitlist {
@@ -115,10 +116,6 @@ enum closure_state {
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
- * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
- * - indicates that cl->task is valid and closure_put() may wake it up.
- * Only set or cleared by the thread that owns the closure.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -128,22 +125,16 @@ enum closure_state {
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
- *
- * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
- * closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 23),
- CLOSURE_DESTRUCTOR = (1 << 23),
- CLOSURE_WAITING = (1 << 25),
- CLOSURE_SLEEPING = (1 << 27),
- CLOSURE_RUNNING = (1 << 29),
- CLOSURE_STACK = (1 << 31),
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
- CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -152,7 +143,7 @@ struct closure {
union {
struct {
struct workqueue_struct *wq;
- struct task_struct *task;
+ struct closure_syncer *s;
struct llist_node list;
closure_fn *fn;
};
@@ -178,7 +169,19 @@ void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
-void closure_sync(struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -215,24 +218,6 @@ static inline void closure_set_waiting(struct closure *cl, unsigned long f)
#endif
}
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
-
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
-}
-
-static inline void __closure_start_sleep(struct closure *cl)
-{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
-}
-
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
@@ -241,7 +226,6 @@ static inline void closure_set_stopped(struct closure *cl)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
- BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
@@ -300,7 +284,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/**
@@ -322,6 +306,8 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c7a02c4900da..af89408befe8 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
return;
check->bi_opf = REQ_OP_READ;
- if (bio_alloc_pages(check, GFP_NOIO))
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
@@ -251,8 +251,7 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- int ret = 0;
-
debug = debugfs_create_dir("bcache", NULL);
- return ret;
+
+ return IS_ERR_OR_NULL(debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index fac97ec2d0e2..a783c5a41ff1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -51,7 +51,10 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
-void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
{
/*
* The halflife of an error is:
@@ -94,8 +97,9 @@ void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s, recovering",
- bdevname(ca->bdev, buf), m);
+ pr_err("%s: IO error on %s%s",
+ bdevname(ca->bdev, buf), m,
+ is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
@@ -108,6 +112,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
@@ -129,7 +134,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
atomic_inc(&c->congested);
}
- bch_count_io_errors(ca, error, m);
+ bch_count_io_errors(ca, error, is_read, m);
}
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index d50c1c97da68..a24c3a95b2c0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
- if (bio_alloc_pages(bio, GFP_KERNEL))
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 643c3021624f..1a46b41dac70 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
int ret;
bch_btree_op_init(&s->op, -1);
@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
return;
}
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
closure_return(cl);
}
@@ -611,8 +633,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = s->orig_bio->bi_disk->queue;
- generic_end_io_acct(q, bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue,
+ bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -841,7 +863,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
cache_bio->bi_private = &s->cl;
bch_bio_map(cache_bio, NULL);
- if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
if (reada)
@@ -974,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b4d28928dec5..133b81225ea9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
- struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
@@ -274,7 +274,9 @@ static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, bio->bi_status, "writing superblock");
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -721,6 +723,9 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
d->c = c;
c->devices[id] = d;
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
closure_get(&c->caching);
}
@@ -906,6 +911,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -1166,7 +1177,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev->bd_holder = dc;
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9))
@@ -1261,7 +1272,7 @@ static int flash_devs_run(struct cache_set *c)
struct uuid_entry *u;
for (u = c->uuids;
- u < c->uuids + c->nr_uuids && !ret;
+ u < c->uuids + c->devices_max_used && !ret;
u++)
if (UUID_FLASH_ONLY(u))
ret = flash_dev_run(c, u);
@@ -1427,7 +1438,7 @@ static void __cache_set_unregister(struct closure *cl)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++)
+ for (i = 0; i < c->devices_max_used; i++)
if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
@@ -1490,7 +1501,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
-
+ c->devices_max_used = 0;
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1810,7 +1821,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+ put_page(bio_first_page_all(&ca->sb_bio));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1864,7 +1875,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index e548b8b51322..a23cd6a14b74 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -249,6 +249,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
: 0;
}
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
void bch_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
@@ -276,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ed5e8a412eb8..4df4c5c1cab2 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
}
void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 56a37884ca8b..51306a19ab03 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -18,17 +18,39 @@
#include <trace/events/bcache.h>
/* Rate limiting */
-
-static void __update_writeback_rate(struct cached_dev *dc)
+static uint64_t __calc_target_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
- int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
- c->cached_dev_sectors);
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
/*
* PI controller:
* Figures out the amount that should be written per second.
@@ -49,6 +71,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
* This acts as a slow, long-term average that is not subject to
* variations in usage like the p term.
*/
+ int64_t target = __calc_target_rate(dc);
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t error = dirty - target;
int64_t proportional_scaled =
@@ -116,6 +139,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
+ uint16_t sequence;
struct bio bio;
};
@@ -194,6 +218,27 @@ static void write_dirty(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
/*
* IO errors are signalled using the dirty bit on the key.
@@ -211,6 +256,9 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -219,8 +267,10 @@ static void read_dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
+ /* is_read = 1 */
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- bio->bi_status, "reading dirty data from cache");
+ bio->bi_status, 1,
+ "reading dirty data from cache");
dirty_endio(bio);
}
@@ -237,10 +287,15 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
- struct keybuf_key *w;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
struct dirty_io *io;
struct closure cl;
+ uint16_t sequence = 0;
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
closure_init_stack(&cl);
/*
@@ -248,45 +303,109 @@ static void read_dirty(struct cached_dev *dc)
* mempools.
*/
- while (!kthread_should_stop()) {
-
- w = bch_keybuf_next(&dc->writeback_keys);
- if (!w)
- break;
-
- BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
-
- if (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)
- while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
-
- dc->last_read = KEY_OFFSET(&w->key);
-
- io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- goto err;
-
- w->private = io;
- io->dc = dc;
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bio_alloc_pages(&io->bio, GFP_KERNEL))
- goto err_free;
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() && next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio,
+ PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /* We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
- trace_bcache_writeback(&w->key);
+ delay = writeback_delay(dc, size);
- down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
- delay = writeback_delay(dc, KEY_SIZE(&w->key));
+ while (!kthread_should_stop() && delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
}
if (0) {
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index a9e3ffb4b03c..66f1c527fa24 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,6 +5,16 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -21,7 +31,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b8ac591aaaa7..414c9af54ded 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -662,7 +662,7 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
- if (rw != WRITE) {
+ if (rw != REQ_OP_WRITE) {
n_sectors = 1 << b->c->sectors_per_block_bits;
offset = 0;
} else {
@@ -740,7 +740,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -753,7 +753,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1123,7 +1123,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1193,7 +1193,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -1454,7 +1454,7 @@ retry:
old_block = b->block;
__unlink_buffer(b);
__link_buffer(b, new_block, b->list_mode);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
__unlink_buffer(b);
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
int l;
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
- unsigned long count = nr_to_scan;
+ unsigned long count = c->n_buffers[LIST_CLEAN] +
+ c->n_buffers[LIST_DIRTY];
unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+ unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+ READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ unsigned long retain_target = get_retain_buffers(c);
- return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ return (count < retain_target) ? 0 : (count - retain_target);
}
/*
@@ -1712,7 +1716,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE_NAME(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
@@ -1723,7 +1727,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
}
@@ -1734,27 +1738,28 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!b) {
r = -ENOMEM;
- goto bad_buffer;
+ goto bad;
}
__free_buffer_wake(b);
}
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ c->shrinker.seeks = 1;
+ c->shrinker.batch = 0;
+ r = register_shrinker(&c->shrinker);
+ if (r)
+ goto bad;
+
mutex_lock(&dm_bufio_clients_lock);
dm_bufio_client_count++;
list_add(&c->client_list, &dm_bufio_all_clients);
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.count_objects = dm_bufio_shrink_count;
- c->shrinker.scan_objects = dm_bufio_shrink_scan;
- c->shrinker.seeks = 1;
- c->shrinker.batch = 0;
- register_shrinker(&c->shrinker);
-
return c;
-bad_buffer:
-bad_cache:
+bad:
while (!list_empty(&c->reserved_buffers)) {
struct dm_buffer *b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1763,6 +1768,7 @@ bad_cache:
}
dm_io_client_destroy(c->dm_io);
bad_dm_io:
+ mutex_destroy(&c->lock);
kfree(c);
bad_client:
return ERR_PTR(r);
@@ -1807,6 +1813,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
BUG_ON(c->n_buffers[i]);
dm_io_client_destroy(c->dm_io);
+ mutex_destroy(&c->lock);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14f9c6a..47407e43b96a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
{
int r;
- r = dm_register_target(&cache_target);
- if (r) {
- DMERR("cache target registration failed: %d", r);
- return r;
- }
-
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
if (!migration_cache) {
dm_unregister_target(&cache_target);
return -ENOMEM;
}
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6a14f945783c..3222e21cbbf8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -91,8 +91,7 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- mempool_t *io_pool;
-
+ struct bio_set *io_bs;
struct bio_set *bs;
/*
@@ -130,8 +129,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
-void dm_init_md_queue(struct mapped_device *md);
-void dm_init_normal_md_queue(struct mapped_device *md);
int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..8168f737590e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1446,7 +1446,6 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
- bv->bv_page = NULL;
}
}
@@ -1954,10 +1953,15 @@ static int crypt_setkey(struct crypt_config *cc)
/* Ignore extra keys (which are used for IV etc) */
subkey_size = crypt_subkey_size(cc);
- if (crypt_integrity_hmac(cc))
+ if (crypt_integrity_hmac(cc)) {
+ if (subkey_size < cc->key_mac_size)
+ return -EINVAL;
+
crypt_copy_authenckey(cc->authenc_key, cc->key,
subkey_size - cc->key_mac_size,
cc->key_mac_size);
+ }
+
for (i = 0; i < cc->tfms_count; i++) {
if (crypt_integrity_hmac(cc))
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2057,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
ret = crypt_setkey(cc);
- /* wipe the kernel key payload copy in each case */
- memset(cc->key, 0, cc->key_size * sizeof(u8));
-
if (!ret) {
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
kzfree(cc->key_string);
@@ -2192,6 +2193,8 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
+ mutex_destroy(&cc->bio_alloc_lock);
+
/* Must zero key material before freeing */
kzfree(cc);
}
@@ -2523,6 +2526,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
}
}
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
return ret;
}
@@ -2697,8 +2704,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
ti->error = "Cannot allocate integrity tags mempool";
+ ret = -ENOMEM;
goto bad;
}
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return ret;
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
ret = cc->iv_gen_ops->init(cc);
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 18, 0},
+ .version = {1, 18, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 288386bfbfb5..1783d80c9cad 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -229,6 +229,8 @@ static void delay_dtr(struct dm_target *ti)
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
+ mutex_destroy(&dc->timer_lock);
+
kfree(dc);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b82cb1ab1eaa..1b907b15f5c3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -70,6 +70,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
arg_name = dm_shift_arg(as);
argc--;
+ if (!arg_name) {
+ ti->error = "Insufficient feature arguments";
+ return -EINVAL;
+ }
+
/*
* drop_writes
*/
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
int r = 0;
unsigned i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
- unsigned char *crypt_data = NULL;
+ unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+ struct skcipher_request *req = NULL;
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
if (blocksize == 1) {
struct scatterlist *sg;
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
- skcipher_request_set_tfm(req, ic->journal_crypt);
+
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
ic->journal_xor = dm_integrity_alloc_page_list(ic);
if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
+ skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
unsigned crypt_len = roundup(ivsize, blocksize);
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
+
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
if (!crypt_data) {
*error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- skcipher_request_set_tfm(req, ic->journal_crypt);
-
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
if (!ic->journal_scatterlist) {
*error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
struct skcipher_request *section_req;
__u32 section_le = cpu_to_le32(i);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
- skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
+ skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
}
bad:
kfree(crypt_data);
+ kfree(crypt_iv);
+ skcipher_request_free(req);
+
return r;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b4357ed4d541..a8d914d5abbe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -58,8 +58,7 @@ struct dm_io_client *dm_io_client_create(void)
if (!client->pool)
goto bad;
- client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e52676fa9832..3f6791afd3e4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1929,10 +1929,10 @@ static int dm_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned dm_poll(struct file *filp, poll_table *wait)
+static __poll_t dm_poll(struct file *filp, poll_table *wait)
{
struct dm_file *priv = filp->private_data;
- unsigned mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dm_global_eventq, wait);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index eb45cc3df31d..e6e7c686646d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -477,8 +477,10 @@ static int run_complete_job(struct kcopyd_job *job)
* If this is the master job, the sub jobs have already
* completed so we can free everything.
*/
- if (job->master_job == job)
+ if (job->master_job == job) {
+ mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
+ }
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -750,6 +752,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ mutex_init(&job->lock);
/*
* set up for the read.
@@ -811,7 +814,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
else {
- mutex_init(&job->lock);
job->progress = 0;
split_job(job);
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 189badbeddaf..3362d866793b 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -594,7 +594,7 @@ static int log_mark(struct log_writes_c *lc, char *data)
return -ENOMEM;
}
- block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
if (!block->data) {
DMERR("Error copying mark data");
kfree(block);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c8faa2b85842..7d3e572072f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,36 +64,30 @@ struct priority_group {
/* Multipath context */
struct multipath {
- struct list_head list;
- struct dm_target *ti;
-
- const char *hw_handler_name;
- char *hw_handler_params;
+ unsigned long flags; /* Multipath state flags */
spinlock_t lock;
-
- unsigned nr_priority_groups;
- struct list_head priority_groups;
-
- wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ enum dm_queue_mode queue_mode;
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned long flags; /* Multipath state flags */
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ unsigned nr_priority_groups;
+ struct list_head priority_groups;
+ const char *hw_handler_name;
+ char *hw_handler_params;
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
-
- atomic_t nr_valid_paths; /* Total number of usable paths */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
- enum dm_queue_mode queue_mode;
-
struct mutex work_mutex;
struct work_struct trigger_event;
+ struct dm_target *ti;
struct work_struct process_queued_bios;
struct bio_list queued_bios;
@@ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath) {
- pgpath->is_active = true;
- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
- }
+ if (!pgpath)
+ return NULL;
+
+ pgpath->is_active = true;
return pgpath;
}
@@ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- set_bit(MPATHF_QUEUE_IO, &m->flags);
atomic_set(&m->nr_valid_paths, 0);
- atomic_set(&m->pg_init_in_progress, 0);
- atomic_set(&m->pg_init_count, 0);
- m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
- init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->queue_mode = DM_TYPE_NONE;
@@ -221,13 +210,26 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+ init_waitqueue_head(&m->pg_init_wait);
}
dm_table_set_type(ti->table, m->queue_mode);
@@ -246,6 +248,7 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
+ mutex_destroy(&m->work_mutex);
kfree(m);
}
@@ -264,29 +267,23 @@ static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
return dm_per_bio_data(bio, multipath_per_bio_data_size());
}
-static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
{
/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
- struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
void *bio_details = mpio + 1;
-
return bio_details;
}
-static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
- struct dm_bio_details **bio_details_p)
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
{
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
- struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
- memset(mpio, 0, sizeof(*mpio));
- memset(bio_details, 0, sizeof(*bio_details));
- dm_bio_record(bio_details, bio);
+ mpio->nr_bytes = bio->bi_iter.bi_size;
+ mpio->pgpath = NULL;
+ *mpio_p = mpio;
- if (mpio_p)
- *mpio_p = mpio;
- if (bio_details_p)
- *bio_details_p = bio_details;
+ dm_bio_record(bio_details, bio);
}
/*-----------------------------------------------
@@ -340,6 +337,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
m->current_pg = pg;
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ return;
+
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,7 +385,8 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
- clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
@@ -458,6 +459,38 @@ do { \
} while (0)
/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+ return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+ dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return __must_push_back(m, flags);
+}
+
+/*
* Map cloned requests (request-based multipath)
*/
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,18 +511,16 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (must_push_back_rq(m))
return DM_MAPIO_DELAY_REQUEUE;
dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
- if (pg_init_all_paths(m))
- return DM_MAPIO_DELAY_REQUEUE;
- return DM_MAPIO_REQUEUE;
+ pg_init_all_paths(m);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -498,12 +529,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- bool queue_dying = blk_queue_dying(q);
- if (queue_dying) {
+ if (blk_queue_dying(q)) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- return DM_MAPIO_DELAY_REQUEUE;
+
+ /*
+ * blk-mq's SCHED_RESTART can cover this requeue, so we
+ * needn't deal with it by DELAY_REQUEUE. More importantly,
+ * we have to return DM_MAPIO_REQUEUE so that blk-mq can
+ * get the queue busy feedback (via BLK_STS_RESOURCE),
+ * otherwise I/O merging can suffer.
+ */
+ if (q->mq_ops)
+ return DM_MAPIO_REQUEUE;
+ else
+ return DM_MAPIO_DELAY_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -525,9 +567,9 @@ static void multipath_release_clone(struct request *clone)
/*
* Map cloned bios (bio-based multipath)
*/
-static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+
+static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
- size_t nr_bytes = bio->bi_iter.bi_size;
struct pgpath *pgpath;
unsigned long flags;
bool queue_io;
@@ -536,7 +578,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
- pgpath = choose_pgpath(m, nr_bytes);
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
@@ -544,23 +586,70 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, bio);
spin_unlock_irqrestore(&m->lock, flags);
+
/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
else if (!queue_io)
queue_work(kmultipathd, &m->process_queued_bios);
- return DM_MAPIO_SUBMITTED;
+
+ return ERR_PTR(-EAGAIN);
}
+ return pgpath;
+}
+
+static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+{
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ /* Do we need to select a new pgpath? */
+ /*
+ * FIXME: currently only switching path if no path (due to failure, etc)
+ * - which negates the point of using a path selector
+ */
+ pgpath = READ_ONCE(m->current_pgpath);
+ if (!pgpath)
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return ERR_PTR(-EAGAIN);
+ }
+ return NULL;
+ }
+
+ return pgpath;
+}
+
+static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+ struct dm_mpath_io *mpio)
+{
+ struct pgpath *pgpath;
+
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ pgpath = __map_bio_nvme(m, bio);
+ else
+ pgpath = __map_bio(m, bio);
+
+ if (IS_ERR(pgpath))
+ return DM_MAPIO_SUBMITTED;
+
+ if (!pgpath) {
+ if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
}
mpio->pgpath = pgpath;
- mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
bio_set_dev(bio, pgpath->path.dev->bdev);
@@ -569,7 +658,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path,
- nr_bytes);
+ mpio->nr_bytes);
return DM_MAPIO_REMAPPED;
}
@@ -578,8 +667,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = NULL;
- multipath_init_per_bio_data(bio, &mpio, NULL);
-
+ multipath_init_per_bio_data(bio, &mpio);
return __multipath_map_bio(m, bio, mpio);
}
@@ -587,7 +675,8 @@ static void process_queued_io_list(struct multipath *m)
{
if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
- else if (m->queue_mode == DM_TYPE_BIO_BASED)
+ else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -617,7 +706,9 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
- r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
+ r = __multipath_map_bio(m, bio, mpio);
switch (r) {
case DM_MAPIO_KILL:
bio->bi_status = BLK_STS_IOERR;
@@ -651,8 +742,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
(!save_old_value && queue_if_no_path));
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- queue_if_no_path || dm_noflush_suspending(m->ti));
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -721,34 +811,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
return 0;
}
-static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
- struct dm_target *ti)
+static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error)
{
- int r;
- struct pgpath *p;
- struct multipath *m = ti->private;
- struct request_queue *q = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
const char *attached_handler_name;
-
- /* we need at least a path arg */
- if (as->argc < 1) {
- ti->error = "no device given";
- return ERR_PTR(-EINVAL);
- }
-
- p = alloc_pgpath();
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
- &p->path.dev);
- if (r) {
- ti->error = "error getting device";
- goto bad;
- }
-
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
- q = bdev_get_queue(p->path.dev->bdev);
+ int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
@@ -780,26 +847,59 @@ retain:
char b[BDEVNAME_SIZE];
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
- bdevname(p->path.dev->bdev, b));
+ bdevname(bdev, b));
goto retain;
}
if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "error attaching hardware handler";
+ return r;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
- ti->error = "unable to set hardware "
- "handler parameters";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "unable to set hardware handler parameters";
+ return r;
}
}
}
+ return 0;
+}
+
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
+ struct dm_target *ti)
+{
+ int r;
+ struct pgpath *p;
+ struct multipath *m = ti->private;
+
+ /* we need at least a path arg */
+ if (as->argc < 1) {
+ ti->error = "no device given";
+ return ERR_PTR(-EINVAL);
+ }
+
+ p = alloc_pgpath();
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
+ if (r) {
+ ti->error = "error getting device";
+ goto bad;
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
+ if (r) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
@@ -807,7 +907,6 @@ retain:
}
return p;
-
bad:
free_pgpath(p);
return ERR_PTR(r);
@@ -902,7 +1001,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
- if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
dm_consume_args(as, hw_argc);
DMERR("bio-based multipath doesn't allow hardware handler args");
return 0;
@@ -991,6 +1091,8 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "nvme"))
+ m->queue_mode = DM_TYPE_NVME_BIO_BASED;
else if (!strcasecmp(queue_mode_name, "rq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1091,7 +1193,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1120,16 +1222,19 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ if (m->hw_handler_name) {
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+ }
- flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
-
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -1444,21 +1549,6 @@ static void activate_path_work(struct work_struct *work)
activate_or_offline_path(pgpath);
}
-static int noretry_error(blk_status_t error)
-{
- switch (error) {
- case BLK_STS_NOTSUPP:
- case BLK_STS_NOSPC:
- case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
- case BLK_STS_MEDIUM:
- return 1;
- }
-
- /* Anything else could be a path failure, so should be retried */
- return 0;
-}
-
static int multipath_end_io(struct dm_target *ti, struct request *clone,
blk_status_t error, union map_info *map_context)
{
@@ -1477,16 +1567,19 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
- if (error && !noretry_error(error)) {
+ if (error && blk_path_error(error)) {
struct multipath *m = ti->private;
- r = DM_ENDIO_REQUEUE;
+ if (error == BLK_STS_RESOURCE)
+ r = DM_ENDIO_DELAY_REQUEUE;
+ else
+ r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
- !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ !must_push_back_rq(m)) {
if (error == BLK_STS_IOERR)
dm_report_EIO(m);
/* complete with the original error */
@@ -1505,7 +1598,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
}
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
- blk_status_t *error)
+ blk_status_t *error)
{
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
@@ -1513,7 +1606,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
unsigned long flags;
int r = DM_ENDIO_DONE;
- if (!*error || noretry_error(*error))
+ if (!*error || !blk_path_error(*error))
goto done;
if (pgpath)
@@ -1521,14 +1614,15 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- dm_report_EIO(m);
- *error = BLK_STS_IOERR;
+ if (must_push_back_bio(m)) {
+ r = DM_ENDIO_REQUEUE;
+ } else {
+ dm_report_EIO(m);
+ *error = BLK_STS_IOERR;
+ }
goto done;
}
- /* Queue for the daemon to resubmit */
- dm_bio_restore(get_bio_details_from_bio(clone), clone);
-
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone);
spin_unlock_irqrestore(&m->lock, flags);
@@ -1636,6 +1730,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
+ case DM_TYPE_NVME_BIO_BASED:
+ DMEMIT("queue_mode nvme ");
+ break;
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
@@ -1957,13 +2054,6 @@ static int __init dm_multipath_init(void)
{
int r;
- r = dm_register_target(&multipath_target);
- if (r < 0) {
- DMERR("request-based register failed %d", r);
- r = -EINVAL;
- goto bad_register_target;
- }
-
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2075,20 @@ static int __init dm_multipath_init(void)
goto bad_alloc_kmpath_handlerd;
}
+ r = dm_register_target(&multipath_target);
+ if (r < 0) {
+ DMERR("request-based register failed %d", r);
+ r = -EINVAL;
+ goto bad_register_target;
+ }
+
return 0;
+bad_register_target:
+ destroy_workqueue(kmpath_handlerd);
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
bad_alloc_kmultipathd:
- dm_unregister_target(&multipath_target);
-bad_register_target:
return r;
}
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 23f178641794..969c4f1a3633 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -195,9 +195,6 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list) {
if (!best ||
(atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
@@ -210,6 +207,9 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6319d846e0ad..7ef469e902c6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -29,6 +29,9 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
+/* Global list of all raid sets */
+static LIST_HEAD(raid_sets);
+
static bool devices_handle_discard_safely = false;
/*
@@ -105,8 +108,6 @@ struct raid_dev {
#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
-#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
-
/*
* Definitions of various constructor flags to
* be used in checks of valid / invalid flags
@@ -209,6 +210,8 @@ struct raid_dev {
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
#define RT_FLAG_RS_SUSPENDED 5
+#define RT_FLAG_RS_IN_SYNC 6
+#define RT_FLAG_RS_RESYNCING 7
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -224,8 +227,8 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
+ struct list_head list;
- uint32_t bitmap_loaded;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
unsigned long runtime_flags;
@@ -270,6 +273,19 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
+/* Find any raid_set in active slot for @rs on global list */
+static struct raid_set *rs_find_active(struct raid_set *rs)
+{
+ struct raid_set *r;
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+
+ list_for_each_entry(r, &raid_sets, list)
+ if (r != rs && dm_table_get_md(r->ti->table) == md)
+ return r;
+
+ return NULL;
+}
+
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@@ -572,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static int raid10_name_to_format(const char *name)
+static const int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
@@ -675,15 +691,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
return NULL;
}
-/*
- * Conditionally change bdev capacity of @rs
- * in case of a disk add/remove reshape
- */
-static void rs_set_capacity(struct raid_set *rs)
+/* Adjust rdev sectors */
+static void rs_set_rdev_sectors(struct raid_set *rs)
{
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
- struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
/*
* raid10 sets rdev->sector to the device size, which
@@ -692,8 +704,16 @@ static void rs_set_capacity(struct raid_set *rs)
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags))
rdev->sectors = mddev->dev_sectors;
+}
- set_capacity(gendisk, mddev->array_sectors);
+/*
+ * Change bdev capacity of @rs in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ set_capacity(gendisk, rs->md.array_sectors);
revalidate_disk(gendisk);
}
@@ -744,6 +764,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
+ INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -761,6 +782,9 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
+ /* Add @rs to global list. */
+ list_add(&rs->list, &raid_sets);
+
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@@ -773,6 +797,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
+/* Free all @rs allocations and remove it from global list. */
static void raid_set_free(struct raid_set *rs)
{
int i;
@@ -790,6 +815,8 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
+ list_del(&rs->list);
+
kfree(rs);
}
@@ -1002,7 +1029,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
!rs->dev[i].rdev.sb_page)
rebuild_cnt++;
- switch (rs->raid_type->level) {
+ switch (rs->md.level) {
case 0:
break;
case 1:
@@ -1017,6 +1044,11 @@ static int validate_raid_redundancy(struct raid_set *rs)
break;
case 10:
copies = raid10_md_layout_to_copies(rs->md.new_layout);
+ if (copies < 2) {
+ DMERR("Bogus raid10 data copies < 2!");
+ return -EINVAL;
+ }
+
if (rebuild_cnt < copies)
break;
@@ -1576,6 +1608,24 @@ static sector_t __rdev_sectors(struct raid_set *rs)
return 0;
}
+/* Check that calculated dev_sectors fits all component devices. */
+static int _check_data_dev_sectors(struct raid_set *rs)
+{
+ sector_t ds = ~0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
+ ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ if (ds < rs->md.dev_sectors) {
+ rs->ti->error = "Component device(s) too small";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/* Calculate the sectors per device and per array used for @rs */
static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
{
@@ -1625,7 +1675,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
- return 0;
+ return _check_data_dev_sectors(rs);
bad:
rs->ti->error = "Target length not divisible by number of data devices";
return -EINVAL;
@@ -1674,8 +1724,11 @@ static void do_table_event(struct work_struct *ws)
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
smp_rmb(); /* Make sure we access most actual mddev properties */
- if (!rs_is_reshaping(rs))
+ if (!rs_is_reshaping(rs)) {
+ if (rs_is_raid10(rs))
+ rs_set_rdev_sectors(rs);
rs_set_capacity(rs);
+ }
dm_table_event(rs->ti->table);
}
@@ -1860,7 +1913,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_takeover_requested(rs))
return false;
- if (!mddev->level)
+ if (rs_is_raid0(rs))
return false;
change = mddev->new_layout != mddev->layout ||
@@ -1868,7 +1921,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
rs->delta_disks;
/* Historical case to support raid1 reshape without delta disks */
- if (mddev->level == 1) {
+ if (rs_is_raid1(rs)) {
if (rs->delta_disks)
return !!rs->delta_disks;
@@ -1876,7 +1929,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
mddev->raid_disks != rs->raid_disks;
}
- if (mddev->level == 10)
+ if (rs_is_raid10(rs))
return change &&
!__is_raid10_far(mddev->new_layout) &&
rs->delta_disks >= 0;
@@ -2340,7 +2393,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
DMERR("new device%s provided without 'rebuild'",
new_devs > 1 ? "s" : "");
return -EINVAL;
- } else if (rs_is_recovering(rs)) {
+ } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
(unsigned long long) mddev->recovery_cp);
return -EINVAL;
@@ -2640,12 +2693,19 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
}
out:
+ /*
+ * Raise recovery_cp in case data_offset != 0 to
+ * avoid false recovery positives in the constructor.
+ */
+ if (rs->md.recovery_cp < rs->md.dev_sectors)
+ rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
if (!test_bit(Journal, &rdev->flags)) {
@@ -2682,14 +2742,14 @@ static int rs_setup_takeover(struct raid_set *rs)
sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
if (rt_is_raid10(rs->raid_type)) {
- if (mddev->level == 0) {
+ if (rs_is_raid0(rs)) {
/* Userpace reordered disks -> adjust raid_disk indexes */
__reorder_raid_disk_indexes(rs);
/* raid0 -> raid10_far layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
rs->raid10_copies);
- } else if (mddev->level == 1)
+ } else if (rs_is_raid1(rs))
/* raid1 -> raid10_near layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
rs->raid_disks);
@@ -2777,6 +2837,23 @@ static int rs_prepare_reshape(struct raid_set *rs)
return 0;
}
+/* Get reshape sectors from data_offsets or raid set */
+static sector_t _get_reshape_sectors(struct raid_set *rs)
+{
+ struct md_rdev *rdev;
+ sector_t reshape_sectors = 0;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags)) {
+ reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
+ rdev->data_offset - rdev->new_data_offset :
+ rdev->new_data_offset - rdev->data_offset;
+ break;
+ }
+
+ return max(reshape_sectors, (sector_t) rs->data_offset);
+}
+
/*
*
* - change raid layout
@@ -2788,6 +2865,7 @@ static int rs_setup_reshape(struct raid_set *rs)
{
int r = 0;
unsigned int cur_raid_devs, d;
+ sector_t reshape_sectors = _get_reshape_sectors(rs);
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
@@ -2804,13 +2882,13 @@ static int rs_setup_reshape(struct raid_set *rs)
/*
* Adjust array size:
*
- * - in case of adding disks, array size has
+ * - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
* which'll hapen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
- * - in case of removing disks, array size
+ * - in case of removing disk(s), array size
* has to shrink before starting the reshape,
* which'll happen here;
* reshape will happen backward, so space has to
@@ -2841,7 +2919,7 @@ static int rs_setup_reshape(struct raid_set *rs)
rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
}
- mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+ mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
@@ -2874,6 +2952,15 @@ static int rs_setup_reshape(struct raid_set *rs)
mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
}
+ /*
+ * Adjust device size for forward reshape
+ * because md_finish_reshape() reduces it.
+ */
+ if (!mddev->reshape_backwards)
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors += reshape_sectors;
+
return r;
}
@@ -2890,7 +2977,7 @@ static void configure_discard_support(struct raid_set *rs)
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
- raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
+ raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
@@ -2915,7 +3002,7 @@ static void configure_discard_support(struct raid_set *rs)
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
*/
- ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
+ ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
ti->num_discard_bios = 1;
}
@@ -2935,10 +3022,10 @@ static void configure_discard_support(struct raid_set *rs)
static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- bool resize;
+ bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors;
+ sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3021,7 +3108,10 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- resize = calculated_dev_sectors != rdev_sectors;
+
+ reshape_sectors = _get_reshape_sectors(rs);
+ if (calculated_dev_sectors != rdev_sectors)
+ resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3105,19 +3195,22 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
- r = rs_prepare_reshape(rs);
- if (r)
- return r;
+ /* Out-of-place space has to be available to allow for a reshape unless raid1! */
+ if (reshape_sectors || rs_is_raid1(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
- /* Reshaping ain't recovery, so disable recovery */
- rs_setup_recovery(rs, MaxSector);
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ }
rs_set_cur(rs);
} else {
/* May not set recovery when a device rebuild is requested */
@@ -3144,13 +3237,20 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_lock_nointr(&rs->md);
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
-
if (r) {
ti->error = "Failed to run raid array";
mddev_unlock(&rs->md);
goto bad;
}
+ r = md_start(&rs->md);
+
+ if (r) {
+ ti->error = "Failed to start raid array";
+ mddev_unlock(&rs->md);
+ goto bad_md_start;
+ }
+
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
@@ -3198,6 +3298,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md);
return 0;
+bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
@@ -3239,25 +3340,27 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
}
/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev)
+static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return "frozen";
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ /* The MD sync thread can be done with io but still be running */
+ if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
+ (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return "reshape";
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
return "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_CHECK, &recovery))
return "check";
return "repair";
}
- if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return "recover";
}
@@ -3274,7 +3377,7 @@ static const char *decipher_sync_action(struct mddev *mddev)
* 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
* '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
*/
-static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
+static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
{
if (!rdev->bdev)
return "-";
@@ -3282,85 +3385,108 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
return "D";
else if (test_bit(Journal, &rdev->flags))
return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
- else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
+ (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
+ !test_bit(In_sync, &rdev->flags)))
return "a";
else
return "A";
}
-/* Helper to return resync/reshape progress for @rs and @array_in_sync */
-static sector_t rs_get_progress(struct raid_set *rs,
- sector_t resync_max_sectors, bool *array_in_sync)
+/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
+static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+ sector_t resync_max_sectors)
{
- sector_t r, curr_resync_completed;
+ sector_t r;
struct mddev *mddev = &rs->md;
- curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- *array_in_sync = false;
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
if (rs_is_raid0(rs)) {
r = resync_max_sectors;
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- r = mddev->reshape_position;
-
- /* Reshape is relative to the array size */
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
- r != MaxSector) {
- if (r == MaxSector) {
- *array_in_sync = true;
- r = resync_max_sectors;
- } else {
- /* Got to reverse on backward reshape */
- if (mddev->reshape_backwards)
- r = mddev->array_sectors - r;
-
- /* Devide by # of data stripes */
- sector_div(r, mddev_data_stripes(rs));
- }
-
- /* Sync is relative to the component device size */
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- r = curr_resync_completed;
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ test_bit(MD_RECOVERY_RUNNING, &recovery))
+ r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
- if ((r == MaxSector) ||
- (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
- (mddev->curr_resync_completed == resync_max_sectors))) {
+ if (r >= resync_max_sectors &&
+ (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
+ (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
+ !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
+ !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
/*
* Sync complete.
*/
- *array_in_sync = true;
- r = resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* In case we have finished recovering, the array is in sync. */
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+ /*
+ * In case we are recovering, the array is not in sync
+ * and health chars should show the recovering legs.
+ */
+ ;
+
+ } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "resync" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "reshape" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
* should not be 'a' anymore.
*/
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
} else {
struct md_rdev *rdev;
/*
+ * We are idle and recovery is needed, prevent 'A' chars race
+ * caused by components still set to in-sync by constrcuctor.
+ */
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery))
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ /*
* The raid set may be doing an initial sync, or it may
* be rebuilding individual components. If all the
* devices are In_sync, then it is the raid set that is
* being initialized.
*/
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags))
- *array_in_sync = true;
-#if 0
- r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
-#endif
+ !test_bit(In_sync, &rdev->flags)) {
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ break;
+ }
}
}
- return r;
+ return min(r, resync_max_sectors);
}
/* Helper to return @dev name or "-" if !@dev */
@@ -3376,7 +3502,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
struct mddev *mddev = &rs->md;
struct r5conf *conf = mddev->private;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
- bool array_in_sync;
+ unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
unsigned int rebuild_disks;
@@ -3396,17 +3522,18 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
+ recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = decipher_sync_action(&rs->md);
+ sync_action = decipher_sync_action(&rs->md, recovery);
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
- DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
+ DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
/*
* In-sync/Reshape ratio:
@@ -3457,7 +3584,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* v1.10.0+:
*/
DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
- __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
+ __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
break;
case STATUSTYPE_TABLE:
@@ -3613,24 +3740,19 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
-static void raid_presuspend(struct dm_target *ti)
-{
- struct raid_set *rs = ti->private;
-
- md_stop_writes(&rs->md);
-}
-
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Writes have to be stopped before suspending to avoid deadlocks. */
+ if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
+ md_stop_writes(&rs->md);
+
mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
mddev_unlock(&rs->md);
}
-
- rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
@@ -3807,10 +3929,33 @@ static int raid_preresume(struct dm_target *ti)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- /* This is a resume after a suspend of the set -> it's already started */
+ /* This is a resume after a suspend of the set -> it's already started. */
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
+ if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ struct raid_set *rs_active = rs_find_active(rs);
+
+ if (rs_active) {
+ /*
+ * In case no rebuilds have been requested
+ * and an active table slot exists, copy
+ * current resynchonization completed and
+ * reshape position pointers across from
+ * suspended raid set in the active slot.
+ *
+ * This resumes the new mapping at current
+ * offsets to continue recover/reshape without
+ * necessarily redoing a raid set partially or
+ * causing data corruption in case of a reshape.
+ */
+ if (rs_active->md.curr_resync_completed != MaxSector)
+ mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
+ if (rs_active->md.reshape_position != MaxSector)
+ mddev->reshape_position = rs_active->md.reshape_position;
+ }
+ }
+
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@@ -3842,11 +3987,10 @@ static int raid_preresume(struct dm_target *ti)
mddev->resync_min = mddev->recovery_cp;
}
- rs_set_capacity(rs);
-
/* Check for any reshape request unless new raid set */
- if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
/* Initiate a reshape. */
+ rs_set_rdev_sectors(rs);
mddev_lock_nointr(mddev);
r = rs_start_reshape(rs);
mddev_unlock(mddev);
@@ -3872,21 +4016,15 @@ static void raid_resume(struct dm_target *ti)
attempt_restore_of_faulty_devices(rs);
}
- mddev->ro = 0;
- mddev->in_sync = 0;
-
- /*
- * Keep the RAID set frozen if reshape/rebuild flags are set.
- * The RAID set is unfrozen once the next table load/resume,
- * which clears the reshape/rebuild flags, occurs.
- * This ensures that the constructor for the inactive table
- * retrieves an up-to-date reshape_position.
- */
- if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Only reduce raid set size before running a disk removing reshape. */
+ if (mddev->delta_disks < 0)
+ rs_set_capacity(rs);
+
mddev_lock_nointr(mddev);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
mddev_resume(mddev);
mddev_unlock(mddev);
}
@@ -3894,7 +4032,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 13, 0},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -3903,7 +4041,6 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
- .presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d32f25489c2..aeaaaef43eff 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -315,6 +315,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
break;
+ case DM_ENDIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
+ break;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -395,7 +399,7 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
blk_status_t r;
@@ -404,9 +408,10 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
+ return r;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -476,8 +481,10 @@ static int map_request(struct dm_rq_target_io *tio)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
+ blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+check_again:
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -492,7 +499,17 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE) {
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+ r = DM_MAPIO_DELAY_REQUEUE;
+ else
+ r = DM_MAPIO_REQUEUE;
+ goto check_again;
+ }
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
@@ -700,7 +717,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
/* disable dm_old_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
/* Initialize the request-based DM worker thread */
@@ -713,8 +729,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return error;
}
- elv_register_queue(md->queue);
-
return 0;
}
@@ -810,17 +824,9 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = PTR_ERR(q);
goto out_tag_set;
}
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- err = blk_mq_register_dev(disk_to_dev(md->disk), q);
- if (err)
- goto out_cleanup_queue;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 7b8642045c55..f006a9005593 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -282,9 +282,6 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list)
if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
best = pi;
@@ -292,6 +289,9 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42e1eda..216035be5661 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -47,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
- struct rw_semaphore lock;
+ struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -439,9 +439,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- down_read(&s->lock);
+ mutex_lock(&s->lock);
active = s->active;
- up_read(&s->lock);
+ mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@@ -909,7 +909,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -924,7 +924,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@@ -983,9 +983,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
goto shut;
}
@@ -1026,10 +1026,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1071,10 +1071,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
- init_rwsem(&s->lock);
+ mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1338,9 +1338,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- down_write(&snap_dest->lock);
+ mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
- up_write(&snap_dest->lock);
+ mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1371,6 +1371,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ mutex_destroy(&s->lock);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1458,7 +1460,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@@ -1466,14 +1468,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@@ -1498,7 +1500,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1694,7 +1696,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/* FIXME: should only take write lock if we need
* to copy an exception */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1717,9 +1719,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@@ -1754,7 +1756,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@@ -1764,7 +1766,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@@ -1774,7 +1776,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
out:
return r;
}
@@ -1810,7 +1812,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1841,12 +1843,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return r;
}
@@ -1878,7 +1880,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_read(&snap_src->lock);
+ mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1888,7 +1890,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- up_read(&snap_src->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1934,11 +1936,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_write(&snap_src->lock);
- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&snap_src->lock);
+ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- up_write(&snap_dest->lock);
- up_write(&snap_src->lock);
+ mutex_unlock(&snap_dest->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1953,9 +1955,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->active = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -1995,7 +1997,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2020,7 +2022,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
break;
@@ -2086,7 +2088,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@@ -2113,9 +2115,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@@ -2158,7 +2160,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@@ -2411,24 +2413,6 @@ static int __init dm_snapshot_init(void)
return r;
}
- r = dm_register_target(&snapshot_target);
- if (r < 0) {
- DMERR("snapshot target register failed %d", r);
- goto bad_register_snapshot_target;
- }
-
- r = dm_register_target(&origin_target);
- if (r < 0) {
- DMERR("Origin target register failed %d", r);
- goto bad_register_origin_target;
- }
-
- r = dm_register_target(&merge_target);
- if (r < 0) {
- DMERR("Merge target register failed %d", r);
- goto bad_register_merge_target;
- }
-
r = init_origin_hash();
if (r) {
DMERR("init_origin_hash failed.");
@@ -2449,19 +2433,37 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
+ r = dm_register_target(&snapshot_target);
+ if (r < 0) {
+ DMERR("snapshot target register failed %d", r);
+ goto bad_register_snapshot_target;
+ }
+
+ r = dm_register_target(&origin_target);
+ if (r < 0) {
+ DMERR("Origin target register failed %d", r);
+ goto bad_register_origin_target;
+ }
+
+ r = dm_register_target(&merge_target);
+ if (r < 0) {
+ DMERR("Merge target register failed %d", r);
+ goto bad_register_merge_target;
+ }
+
return 0;
-bad_pending_cache:
- kmem_cache_destroy(exception_cache);
-bad_exception_cache:
- exit_origin_hash();
-bad_origin_hash:
- dm_unregister_target(&merge_target);
bad_register_merge_target:
dm_unregister_target(&origin_target);
bad_register_origin_target:
dm_unregister_target(&snapshot_target);
bad_register_snapshot_target:
+ kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+ kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+ exit_origin_hash();
+bad_origin_hash:
dm_exception_store_exit();
return r;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 29bc51084c82..56059fb56e2d 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -228,6 +228,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
dm_stat_free(&s->rcu_head);
}
free_percpu(stats->last);
+ mutex_destroy(&stats->mutex);
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88130b5d95f9..5fe7ec356c33 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
+ goto out;
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
- refcount_inc(&dd->count);
}
-
+ refcount_inc(&dd->count);
+out:
*result = dd->dm_dev;
return 0;
}
@@ -865,7 +866,8 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED ||
+ table_type == DM_TYPE_NVME_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -908,13 +910,33 @@ static bool dm_table_supports_dax(struct dm_table *t)
return true;
}
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
+
+struct verify_rq_based_data {
+ unsigned sq_count;
+ unsigned mq_count;
+};
+
+static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct verify_rq_based_data *v = data;
+
+ if (q->mq_ops)
+ v->mq_count++;
+ else
+ v->sq_count++;
+
+ return queue_is_rq_based(q);
+}
+
static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- unsigned sq_count = 0, mq_count = 0;
+ struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
- struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -922,6 +944,14 @@ static int dm_table_determine_type(struct dm_table *t)
/* target already set the table's type */
if (t->type == DM_TYPE_BIO_BASED)
return 0;
+ else if (t->type == DM_TYPE_NVME_BIO_BASED) {
+ if (!dm_table_does_not_support_partial_completion(t)) {
+ DMERR("nvme bio-based is only possible with devices"
+ " that don't support partial completion");
+ return -EINVAL;
+ }
+ /* Fallthru, also verify all devices are blk-mq */
+ }
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
goto verify_rq_based;
}
@@ -936,8 +966,8 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMWARN("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types"
+ " can't be mixed up");
return -EINVAL;
}
}
@@ -958,8 +988,18 @@ static int dm_table_determine_type(struct dm_table *t)
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
- (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
+ } else {
+ /* Check if upgrading to NVMe bio-based is valid or required */
+ tgt = dm_table_get_immutable_target(t);
+ if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
+ } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ }
+ }
return 0;
}
@@ -979,7 +1019,8 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMWARN("Request-based dm doesn't support multiple targets yet");
+ DMERR("%s DM doesn't support multiple targets",
+ t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
return -EINVAL;
}
@@ -996,28 +1037,29 @@ verify_rq_based:
return 0;
}
- /* Non-request-stackable devices can't be used for request-based dm */
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
-
- if (!queue_is_rq_based(q)) {
- DMERR("table load rejected: including"
- " non-request-stackable devices");
- return -EINVAL;
- }
+ tgt = dm_table_get_immutable_target(t);
+ if (!tgt) {
+ DMERR("table load rejected: immutable target is required");
+ return -EINVAL;
+ } else if (tgt->max_io_len) {
+ DMERR("table load rejected: immutable target that splits IO is not supported");
+ return -EINVAL;
+ }
- if (q->mq_ops)
- mq_count++;
- else
- sq_count++;
+ /* Non-request-stackable devices can't be used for request-based dm */
+ if (!tgt->type->iterate_devices ||
+ !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ DMERR("table load rejected: including non-request-stackable devices");
+ return -EINVAL;
}
- if (sq_count && mq_count) {
+ if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = mq_count > 0;
+ t->all_blk_mq = v.mq_count > 0;
- if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ if (!t->all_blk_mq &&
+ (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
@@ -1078,7 +1120,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0;
- struct dm_target *tgt;
+ unsigned min_pool_size = 0;
+ struct dm_target *ti;
unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
@@ -1088,11 +1131,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
+ ti = t->targets + i;
+ per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+ min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
+ per_io_data_size, min_pool_size);
if (!t->mempools)
return -ENOMEM;
@@ -1704,6 +1749,20 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
+static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ char b[BDEVNAME_SIZE];
+
+ /* For now, NVMe devices are the only devices of this class */
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+}
+
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
+{
+ return dm_table_all_devices_attribute(t, device_no_partial_completion);
+}
+
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1819,6 +1878,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
#define SECTOR_TO_BLOCK_SHIFT 3
/*
+ * For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
+ * For btree remove:
+ * 2 for shadow spine +
+ * 4 for rebalance 3 child node
*/
-#define THIN_MAX_CONCURRENT_LOCKS 5
+#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff9b4cf..629c555890c1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -492,6 +492,11 @@ static void pool_table_init(void)
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}
+static void pool_table_exit(void)
+{
+ mutex_destroy(&dm_thin_pool_table.mutex);
+}
+
static void __pool_table_insert(struct pool *pool)
{
BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
@@ -1717,7 +1722,7 @@ static void __remap_and_issue_shared_cell(void *context,
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
inc_all_io_entry(info->tc->pool, bio);
@@ -4355,30 +4360,28 @@ static struct target_type thin_target = {
static int __init dm_thin_init(void)
{
- int r;
+ int r = -ENOMEM;
pool_table_init();
+ _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+ if (!_new_mapping_cache)
+ return r;
+
r = dm_register_target(&thin_target);
if (r)
- return r;
+ goto bad_new_mapping_cache;
r = dm_register_target(&pool_target);
if (r)
- goto bad_pool_target;
-
- r = -ENOMEM;
-
- _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
- if (!_new_mapping_cache)
- goto bad_new_mapping_cache;
+ goto bad_thin_target;
return 0;
-bad_new_mapping_cache:
- dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+ kmem_cache_destroy(_new_mapping_cache);
return r;
}
@@ -4389,6 +4392,8 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
+
+ pool_table_exit();
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
new file mode 100644
index 000000000000..65f838fa2e99
--- /dev/null
+++ b/drivers/md/dm-unstripe.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/device-mapper.h>
+
+struct unstripe_c {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+ uint32_t stripes;
+
+ uint32_t unstripe;
+ sector_t unstripe_width;
+ sector_t unstripe_offset;
+
+ uint32_t chunk_size;
+ u8 chunk_shift;
+};
+
+#define DM_MSG_PREFIX "unstriped"
+
+static void cleanup_unstripe(struct unstripe_c *uc, struct dm_target *ti)
+{
+ if (uc->dev)
+ dm_put_device(ti, uc->dev);
+ kfree(uc);
+}
+
+/*
+ * Contruct an unstriped mapping.
+ * <number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+ */
+static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct unstripe_c *uc;
+ sector_t tmp_len;
+ unsigned long long start;
+ char dummy;
+
+ if (argc != 5) {
+ ti->error = "Invalid number of arguments";
+ return -EINVAL;
+ }
+
+ uc = kzalloc(sizeof(*uc), GFP_KERNEL);
+ if (!uc) {
+ ti->error = "Memory allocation for unstriped context failed";
+ return -ENOMEM;
+ }
+
+ if (kstrtouint(argv[0], 10, &uc->stripes) || !uc->stripes) {
+ ti->error = "Invalid stripe count";
+ goto err;
+ }
+
+ if (kstrtouint(argv[1], 10, &uc->chunk_size) || !uc->chunk_size) {
+ ti->error = "Invalid chunk_size";
+ goto err;
+ }
+
+ // FIXME: must support non power of 2 chunk_size, dm-stripe.c does
+ if (!is_power_of_2(uc->chunk_size)) {
+ ti->error = "Non power of 2 chunk_size is not supported yet";
+ goto err;
+ }
+
+ if (kstrtouint(argv[2], 10, &uc->unstripe)) {
+ ti->error = "Invalid stripe number";
+ goto err;
+ }
+
+ if (uc->unstripe > uc->stripes && uc->stripes > 1) {
+ ti->error = "Please provide stripe between [0, # of stripes]";
+ goto err;
+ }
+
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) {
+ ti->error = "Couldn't get striped device";
+ goto err;
+ }
+
+ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+ ti->error = "Invalid striped device offset";
+ goto err;
+ }
+ uc->physical_start = start;
+
+ uc->unstripe_offset = uc->unstripe * uc->chunk_size;
+ uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
+ uc->chunk_shift = fls(uc->chunk_size) - 1;
+
+ tmp_len = ti->len;
+ if (sector_div(tmp_len, uc->chunk_size)) {
+ ti->error = "Target length not divisible by chunk size";
+ goto err;
+ }
+
+ if (dm_set_target_max_io_len(ti, uc->chunk_size)) {
+ ti->error = "Failed to set max io len";
+ goto err;
+ }
+
+ ti->private = uc;
+ return 0;
+err:
+ cleanup_unstripe(uc, ti);
+ return -EINVAL;
+}
+
+static void unstripe_dtr(struct dm_target *ti)
+{
+ struct unstripe_c *uc = ti->private;
+
+ cleanup_unstripe(uc, ti);
+}
+
+static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ /* Shift us up to the right "row" on the stripe */
+ sector += uc->unstripe_width * (sector >> uc->chunk_shift);
+
+ /* Account for what stripe we're operating on */
+ sector += uc->unstripe_offset;
+
+ return sector;
+}
+
+static int unstripe_map(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+
+ bio_set_dev(bio, uc->dev->bdev);
+ bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void unstripe_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct unstripe_c *uc = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%d %llu %d %s %llu",
+ uc->stripes, (unsigned long long)uc->chunk_size, uc->unstripe,
+ uc->dev->name, (unsigned long long)uc->physical_start);
+ break;
+ }
+}
+
+static int unstripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct unstripe_c *uc = ti->private;
+
+ return fn(ti, uc->dev, uc->physical_start, ti->len, data);
+}
+
+static void unstripe_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ struct unstripe_c *uc = ti->private;
+
+ limits->chunk_sectors = uc->chunk_size;
+}
+
+static struct target_type unstripe_target = {
+ .name = "unstriped",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = unstripe_ctr,
+ .dtr = unstripe_dtr,
+ .map = unstripe_map,
+ .status = unstripe_status,
+ .iterate_devices = unstripe_iterate_devices,
+ .io_hints = unstripe_io_hints,
+};
+
+static int __init dm_unstripe_init(void)
+{
+ int r;
+
+ r = dm_register_target(&unstripe_target);
+ if (r < 0)
+ DMERR("target registration failed");
+
+ return r;
+}
+
+static void __exit dm_unstripe_exit(void)
+{
+ dm_unregister_target(&unstripe_target);
+}
+
+module_init(dm_unstripe_init);
+module_exit(dm_unstripe_exit);
+
+MODULE_DESCRIPTION(DM_NAME " unstriped target");
+MODULE_AUTHOR("Scott Bauer <scott.bauer@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 70485de37b66..969954915566 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2333,6 +2333,9 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Free the zone descriptors */
dmz_drop_zones(zmd);
+
+ mutex_destroy(&zmd->mblk_flush_lock);
+ mutex_destroy(&zmd->map_lock);
}
/*
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6d7bda6f8190..caff02caf083 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -827,6 +827,7 @@ err_fwq:
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
+ mutex_destroy(&dmz->chunk_lock);
bioset_free(dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
@@ -861,6 +862,8 @@ static void dmz_dtr(struct dm_target *ti)
dmz_put_zoned_device(ti);
+ mutex_destroy(&dmz->chunk_lock);
+
kfree(dmz);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index de17b7193299..d6de00f367ef 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,18 +60,73 @@ void dm_issue_global_event(void)
}
/*
- * One of these is allocated per bio.
+ * One of these is allocated (on-stack) per original bio.
*/
+struct clone_info {
+ struct dm_table *map;
+ struct bio *bio;
+ struct dm_io *io;
+ sector_t sector;
+ unsigned sector_count;
+};
+
+/*
+ * One of these is allocated per clone bio.
+ */
+#define DM_TIO_MAGIC 7282014
+struct dm_target_io {
+ unsigned magic;
+ struct dm_io *io;
+ struct dm_target *ti;
+ unsigned target_bio_nr;
+ unsigned *len_ptr;
+ bool inside_dm_io;
+ struct bio clone;
+};
+
+/*
+ * One of these is allocated per original bio.
+ * It contains the first clone used for that original.
+ */
+#define DM_IO_MAGIC 5191977
struct dm_io {
+ unsigned magic;
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
- struct bio *bio;
+ struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
+ /* last member of dm_target_io is 'struct bio' */
+ struct dm_target_io tio;
};
+void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ if (!tio->inside_dm_io)
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+ return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
+}
+EXPORT_SYMBOL_GPL(dm_per_bio_data);
+
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+ if (io->magic == DM_IO_MAGIC)
+ return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
+ BUG_ON(io->magic != DM_TIO_MAGIC);
+ return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
+}
+EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+}
+EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -93,8 +148,8 @@ static int dm_numa_node = DM_NUMA_NODE;
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
- mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *io_bs;
};
struct table_device {
@@ -103,7 +158,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -170,14 +224,9 @@ static int __init local_init(void)
{
int r = -ENOMEM;
- /* allocate a slab for the dm_ios */
- _io_cache = KMEM_CACHE(dm_io, 0);
- if (!_io_cache)
- return r;
-
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
- goto out_free_io_cache;
+ return r;
_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
@@ -212,8 +261,6 @@ out_free_rq_cache:
kmem_cache_destroy(_rq_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
-out_free_io_cache:
- kmem_cache_destroy(_io_cache);
return r;
}
@@ -225,7 +272,6 @@ static void local_exit(void)
kmem_cache_destroy(_rq_cache);
kmem_cache_destroy(_rq_tio_cache);
- kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
@@ -486,18 +532,69 @@ out:
return r;
}
-static struct dm_io *alloc_io(struct mapped_device *md)
+static void start_io_acct(struct dm_io *io);
+
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
- return mempool_alloc(md->io_pool, GFP_NOIO);
+ struct dm_io *io;
+ struct dm_target_io *tio;
+ struct bio *clone;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = true;
+ tio->io = NULL;
+
+ io = container_of(tio, struct dm_io, tio);
+ io->magic = DM_IO_MAGIC;
+ io->status = 0;
+ atomic_set(&io->io_count, 1);
+ io->orig_bio = bio;
+ io->md = md;
+ spin_lock_init(&io->endio_lock);
+
+ start_io_acct(io);
+
+ return io;
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
- mempool_free(io, md->io_pool);
+ bio_put(&io->tio.clone);
+}
+
+static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, gfp_t gfp_mask)
+{
+ struct dm_target_io *tio;
+
+ if (!ci->io->tio.io) {
+ /* the dm_target_io embedded in ci->io is available */
+ tio = &ci->io->tio;
+ } else {
+ struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = false;
+ }
+
+ tio->magic = DM_TIO_MAGIC;
+ tio->io = ci->io;
+ tio->ti = ti;
+ tio->target_bio_nr = target_bio_nr;
+
+ return tio;
}
static void free_tio(struct dm_target_io *tio)
{
+ if (tio->inside_dm_io)
+ return;
bio_put(&tio->clone);
}
@@ -510,17 +607,15 @@ int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
- int cpu;
+ struct bio *bio = io->orig_bio;
int rw = bio_data_dir(bio);
io->start_time = jiffies;
- cpu = part_stat_lock();
- part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
- part_stat_unlock();
+ generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+
atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
+ atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -531,7 +626,7 @@ static void start_io_acct(struct dm_io *io)
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
int pending;
int rw = bio_data_dir(bio);
@@ -752,15 +847,6 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
return 0;
}
-/*-----------------------------------------------------------------
- * CRUD START:
- * A more elegant soln is in the works that uses the queue
- * merge fn, unfortunately there are a couple of changes to
- * the block layer that I want to make for this. So in the
- * interests of getting something for people to use I give
- * you this clearly demarcated crap.
- *---------------------------------------------------------------*/
-
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -780,8 +866,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE &&
- __noflush_suspending(md)))
+ if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
@@ -793,7 +878,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
@@ -801,7 +887,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
io_error = io->status;
- bio = io->bio;
+ bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
@@ -847,7 +933,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (unlikely(error == BLK_STS_TARGET)) {
+ if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
@@ -920,7 +1006,15 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- ti->max_io_len = (uint32_t) len;
+ /*
+ * BIO based queue uses its own splitting. When multipage bvecs
+ * is switched on, size of the incoming bio may be too big to
+ * be handled in some targets, such as crypt.
+ *
+ * When these targets are ready for the big bio, we can remove
+ * the limit.
+ */
+ ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
return 0;
}
@@ -997,7 +1091,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH.
+ * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1047,7 +1141,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->bio;
+ struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
@@ -1114,67 +1208,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-/*
- * Flush current->bio_list when the target map method blocks.
- * This fixes deadlocks in snapshot and possibly in other targets.
- */
-struct dm_offload {
- struct blk_plug plug;
- struct blk_plug_cb cb;
-};
-
-static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct dm_offload *o = container_of(cb, struct dm_offload, cb);
- struct bio_list list;
- struct bio *bio;
- int i;
-
- INIT_LIST_HEAD(&o->cb.list);
-
- if (unlikely(!current->bio_list))
- return;
-
- for (i = 0; i < 2; i++) {
- list = current->bio_list[i];
- bio_list_init(&current->bio_list[i]);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set ||
- !bs->rescue_workqueue) {
- bio_list_add(&current->bio_list[i], bio);
- continue;
- }
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
- }
- }
-}
-
-static void dm_offload_start(struct dm_offload *o)
-{
- blk_start_plug(&o->plug);
- o->cb.callback = flush_current_bio_list;
- list_add(&o->cb.list, &current->plug->cb_list);
-}
-
-static void dm_offload_end(struct dm_offload *o)
-{
- list_del(&o->cb.list);
- blk_finish_plug(&o->plug);
-}
-
-static void __map_bio(struct dm_target_io *tio)
+static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct dm_offload o;
struct bio *clone = &tio->clone;
+ struct dm_io *io = tio->io;
+ struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
+ blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1183,44 +1225,37 @@ static void __map_bio(struct dm_target_io *tio)
* anything, the target has assumed ownership of
* this io.
*/
- atomic_inc(&tio->io->io_count);
+ atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
- dm_offload_start(&o);
r = ti->type->map(ti, clone);
- dm_offload_end(&o);
-
switch (r) {
case DM_MAPIO_SUBMITTED:
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(tio->io->bio), sector);
- generic_make_request(clone);
+ bio_dev(io->orig_bio), sector);
+ if (md->type == DM_TYPE_NVME_BIO_BASED)
+ ret = direct_make_request(clone);
+ else
+ ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
- dec_pending(tio->io, BLK_STS_IOERR);
free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
- dec_pending(tio->io, BLK_STS_DM_REQUEUE);
free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-}
-struct clone_info {
- struct mapped_device *md;
- struct dm_table *map;
- struct bio *bio;
- struct dm_io *io;
- sector_t sector;
- unsigned sector_count;
-};
+ return ret;
+}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
@@ -1264,28 +1299,49 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return 0;
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr)
+static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ struct dm_target *ti, unsigned num_bios)
{
struct dm_target_io *tio;
- struct bio *clone;
+ int try;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ if (!num_bios)
+ return;
- tio->io = ci->io;
- tio->ti = ti;
- tio->target_bio_nr = target_bio_nr;
+ if (num_bios == 1) {
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ bio_list_add(blist, &tio->clone);
+ return;
+ }
- return tio;
+ for (try = 0; try < 2; try++) {
+ int bio_nr;
+ struct bio *bio;
+
+ if (try)
+ mutex_lock(&ci->io->md->table_devices_lock);
+ for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+ tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
+ if (!tio)
+ break;
+
+ bio_list_add(blist, &tio->clone);
+ }
+ if (try)
+ mutex_unlock(&ci->io->md->table_devices_lock);
+ if (bio_nr == num_bios)
+ return;
+
+ while ((bio = bio_list_pop(blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ free_tio(tio);
+ }
+ }
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len)
+static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target_io *tio, unsigned *len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
struct bio *clone = &tio->clone;
tio->len_ptr = len;
@@ -1294,16 +1350,22 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
if (len)
bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
+ return __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
- unsigned target_bio_nr;
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct bio *bio;
+ struct dm_target_io *tio;
+
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
- for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
- __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
+ while ((bio = bio_list_pop(&blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ (void) __clone_and_map_simple_bio(ci, tio, len);
+ }
}
static int __send_empty_flush(struct clone_info *ci)
@@ -1319,32 +1381,22 @@ static int __send_empty_flush(struct clone_info *ci)
}
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
- unsigned target_bio_nr;
- unsigned num_target_bios = 1;
- int r = 0;
+ int r;
- /*
- * Does the target want to receive duplicate copies of the bio?
- */
- if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
- num_target_bios = ti->num_write_bios(ti, bio);
-
- for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, target_bio_nr);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- break;
- }
- __map_bio(tio);
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ tio->len_ptr = len;
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(tio);
+ return r;
}
+ (void) __map_bio(tio);
- return r;
+ return 0;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1371,56 +1423,50 @@ static bool is_split_required_for_discard(struct dm_target *ti)
return ti->split_discard_bios;
}
-static int __send_changing_extent_only(struct clone_info *ci,
+static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
get_num_bios_fn get_num_bios,
is_split_required_fn is_split_required)
{
- struct dm_target *ti;
unsigned len;
unsigned num_bios;
- do {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- /*
- * Even though the device advertised support for this type of
- * request, that does not mean every target supports it, and
- * reconfiguration might also have changed that since the
- * check was performed.
- */
- num_bios = get_num_bios ? get_num_bios(ti) : 0;
- if (!num_bios)
- return -EOPNOTSUPP;
+ /*
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
+ * check was performed.
+ */
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
+ return -EOPNOTSUPP;
- if (is_split_required && !is_split_required(ti))
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
- else
- len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
+ if (is_split_required && !is_split_required(ti))
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ else
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, &len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
- ci->sector += len;
- } while (ci->sector_count -= len);
+ ci->sector += len;
+ ci->sector_count -= len;
return 0;
}
-static int __send_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_discard_bios,
+ return __send_changing_extent_only(ci, ti, get_num_discard_bios,
is_split_required_for_discard);
}
-static int __send_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
}
-static int __send_write_zeroes(struct clone_info *ci)
+static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
}
/*
@@ -1433,17 +1479,17 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
- return __send_discard(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- return __send_write_same(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
- return __send_write_zeroes(ci);
-
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ return __send_discard(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+ return __send_write_same(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+ return __send_write_zeroes(ci, ti);
+
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
len = ci->sector_count;
else
@@ -1460,34 +1506,33 @@ static int __split_and_process_non_flush(struct clone_info *ci)
return 0;
}
+static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ ci->map = map;
+ ci->io = alloc_io(md, bio);
+ ci->sector = bio->bi_iter.bi_sector;
+}
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static void __split_and_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
if (unlikely(!map)) {
bio_io_error(bio);
- return;
+ return ret;
}
- ci.map = map;
- ci.md = md;
- ci.io = alloc_io(md);
- ci.io->status = 0;
- atomic_set(&ci.io->io_count, 1);
- ci.io->bio = bio;
- ci.io->md = md;
- spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_iter.bi_sector;
-
- start_io_acct(ci.io);
+ init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.md->flush_bio;
+ ci.bio = &ci.io->md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1498,32 +1543,95 @@ static void __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error)
+ while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
+ if (current->bio_list && ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to generic_make_request()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ * As this path is not used for REQ_OP_ZONE_REPORT,
+ * the usage of io->orig_bio in dm_remap_zone_report()
+ * won't be affected by this reassignment.
+ */
+ struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
+ md->queue->bio_split);
+ ci.io->orig_bio = b;
+ bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
+ bio_chain(b, bio);
+ ret = generic_make_request(bio);
+ break;
+ }
+ }
}
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
}
-/*-----------------------------------------------------------------
- * CRUD END
- *---------------------------------------------------------------*/
/*
- * The request function that just remaps the bio built up by
- * dm_merge_bvec.
+ * Optimized variant of __split_and_process_bio that leverages the
+ * fact that targets that use it do _not_ have a need to split bios.
*/
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t __process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int error = 0;
+
+ if (unlikely(!map)) {
+ bio_io_error(bio);
+ return ret;
+ }
+
+ init_clone_info(&ci, md, map, bio);
+
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ ci.bio = &ci.io->md->flush_bio;
+ ci.sector_count = 0;
+ error = __send_empty_flush(&ci);
+ /* dec_pending submits any data associated with flush */
+ } else {
+ struct dm_target *ti = md->immutable_target;
+ struct dm_target_io *tio;
+
+ /*
+ * Defend against IO still getting in during teardown
+ * - as was seen for a time with nvme-fcloop
+ */
+ if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ error = -EIO;
+ goto out;
+ }
+
+ tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ ci.bio = bio;
+ ci.sector_count = bio_sectors(bio);
+ ret = __clone_and_map_simple_bio(&ci, tio, NULL);
+ }
+out:
+ /* drop the extra reference count */
+ dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
+}
+
+typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
+
+static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
+ process_bio_fn process_bio)
{
- int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0);
-
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
@@ -1532,12 +1640,27 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return ret;
}
- __split_and_process_bio(md, map, bio);
+ ret = process_bio(md, map, bio);
+
dm_put_live_table(md, srcu_idx);
- return BLK_QC_T_NONE;
+ return ret;
+}
+
+/*
+ * The request function that remaps the bio to one target and
+ * splits off any remainder.
+ */
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __split_and_process_bio);
+}
+
+static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __process_bio);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -1618,20 +1741,9 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-void dm_init_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize data that will only be used by a non-blk-mq DM queue
- * - must do so here (in alloc_dev callchain) before queue is used
- */
- md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
-}
-
-void dm_init_normal_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
- dm_init_md_queue(md);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
@@ -1645,9 +1757,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
+ if (md->io_bs)
+ bioset_free(md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
@@ -1673,6 +1786,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->bdev = NULL;
}
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
+
dm_mq_cleanup_mapped_device(md);
}
@@ -1726,10 +1843,10 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info->congested_data = md;
- dm_init_md_queue(md);
-
- md->disk = alloc_disk_node(1, numa_node_id);
+ md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
@@ -1753,7 +1870,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
md->dax_dev = dax_dev;
- add_disk(md->disk);
+ add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1812,17 +1929,22 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->bs) {
- /* The md already has necessary mempools. */
- if (dm_table_bio_based(t)) {
- /*
- * Reload bioset because front_pad may have changed
- * because a different table was loaded.
- */
+ if (dm_table_bio_based(t)) {
+ /*
+ * The md may already have mempools that need changing.
+ * If so, reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ if (md->bs) {
bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ md->bs = NULL;
}
+ if (md->io_bs) {
+ bioset_free(md->io_bs);
+ md->io_bs = NULL;
+ }
+
+ } else if (md->bs) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
@@ -1834,13 +1956,12 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->bs);
+ BUG_ON(!p || md->bs || md->io_bs);
- md->io_pool = p->io_pool;
- p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
-
+ md->io_bs = p->io_bs;
+ p->io_bs = NULL;
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
@@ -1886,6 +2007,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
+ bool request_based = dm_table_request_based(t);
sector_t size;
lockdep_assert_held(&md->suspend_lock);
@@ -1909,12 +2031,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t)) {
+ if (request_based)
dm_stop_queue(q);
+
+ if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
/*
- * Leverage the fact that request-based DM targets are
- * immutable singletons and establish md->immutable_target
- * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ * Leverage the fact that request-based DM targets and
+ * NVMe bio based targets are immutable singletons
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq;
+ * and __process_bio.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -1954,13 +2079,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
+ int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
- dm_sysfs_init(md);
+ r = dm_sysfs_init(md);
+ if (r) {
+ free_dev(md);
+ return r;
+ }
*result = md;
return 0;
@@ -2013,10 +2143,12 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
+ struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
@@ -2034,21 +2166,24 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- /*
- * DM handles splitting bios as needed. Free the bio_split bioset
- * since it won't be used (saves 1 process per bio-based DM device).
- */
- bioset_free(md->queue->bio_split);
- md->queue->bio_split = NULL;
-
- if (type == DM_TYPE_DAX_BIO_BASED)
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
+ break;
+ case DM_TYPE_NVME_BIO_BASED:
+ dm_init_normal_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request_nvme);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
+ r = dm_calculate_queue_limits(t, &limits);
+ if (r) {
+ DMERR("Cannot calculate initial queue limits");
+ return r;
+ }
+ dm_table_set_restrictions(t, md->queue, &limits);
+ blk_register_queue(md->disk);
+
return 0;
}
@@ -2113,7 +2248,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
- struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -2124,7 +2258,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(q);
+ blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
@@ -2735,11 +2869,12 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_io_data_size)
+ unsigned integrity, unsigned per_io_data_size,
+ unsigned min_pool_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
- unsigned int front_pad;
+ unsigned int front_pad, io_front_pad;
if (!pools)
return NULL;
@@ -2747,16 +2882,19 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- pool_size = dm_get_reserved_bio_based_ios();
+ case DM_TYPE_NVME_BIO_BASED:
+ pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
-
- pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
- if (!pools->io_pool)
+ io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+ pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
+ if (!pools->io_bs)
+ goto out;
+ if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
case DM_TYPE_MQ_REQUEST_BASED:
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2764,7 +2902,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
BUG();
}
- pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
+ pools->bs = bioset_create(pool_size, front_pad, 0);
if (!pools->bs)
goto out;
@@ -2784,10 +2922,10 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- mempool_destroy(pools->io_pool);
-
if (pools->bs)
bioset_free(pools->bs);
+ if (pools->io_bs)
+ bioset_free(pools->io_bs);
kfree(pools);
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 36399bb875dd..114a81b27c37 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -49,7 +49,6 @@ struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
*---------------------------------------------------------------*/
-void dm_table_destroy(struct dm_table *t);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
@@ -206,7 +205,8 @@ void dm_kcopyd_exit(void);
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_bio_data_size);
+ unsigned integrity, unsigned per_bio_data_size,
+ unsigned min_pool_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e4dee0ec2de..0081ace39a64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
return NULL;
}
-static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
@@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
static struct md_personality *find_pers(int level, char *clevel)
{
@@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
- /*
- * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
- * up mddev->thread. It is important to initialize critical
- * resources for mddev->thread BEFORE calling pers->run().
- */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
@@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_allow_write(mddev);
+ /* run start up tasks that require md_thread */
+ md_start(mddev);
+
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5689,6 +5688,21 @@ out:
return err;
}
+int md_start(struct mddev *mddev)
+{
+ int ret = 0;
+
+ if (mddev->pers->start) {
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ ret = mddev->pers->start(mddev);
+ clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(md_start);
+
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
return -ENODEV;
rcu_read_lock();
- rdev = find_rdev_rcu(mddev, dev);
+ rdev = md_find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
@@ -7871,10 +7885,10 @@ static int md_seq_open(struct inode *inode, struct file *file)
}
static int md_unloading;
-static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
- int mask;
+ __poll_t mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
@@ -8169,7 +8183,8 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7d6bcf0eba0c..58cd20a5e85e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -485,6 +485,7 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE, /* A reshape is happening */
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+ MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -523,7 +524,13 @@ struct md_personality
struct list_head list;
struct module *owner;
bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ /*
+ * start up works that do NOT require md_thread. tasks that
+ * requires md_thread should go into start()
+ */
int (*run)(struct mddev *mddev);
+ /* start up works that require md threads */
+ int (*start)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
@@ -687,6 +694,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
+extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
@@ -702,6 +710,7 @@ extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
pn->keys[1] = rn->keys[0];
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
- /*
- * rejig the spine. This is ugly, since it knows too
- * much about the spine
- */
- if (s->nodes[0] != new_parent) {
- unlock_block(s->info, s->nodes[0]);
- s->nodes[0] = new_parent;
- }
- if (key < le64_to_cpu(rn->keys[0])) {
- unlock_block(s->info, right);
- s->nodes[1] = left;
- } else {
- unlock_block(s->info, left);
- s->nodes[1] = right;
- }
- s->count = 2;
-
+ unlock_block(s->info, left);
+ unlock_block(s->info, right);
return 0;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6df398e3a008..b2eae332e1a2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -815,6 +815,17 @@ static void flush_pending_writes(struct r1conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
flush_bio_list(conf, bio);
blk_finish_plug(&plug);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c131835cf008..99c9207899a7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -900,6 +900,18 @@ static void flush_pending_writes(struct r10conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
+
blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 39f31f07ffe9..3c65f52b68f5 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1111,9 +1111,6 @@ void r5l_write_stripe_run(struct r5l_log *log)
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
- if (!log)
- return -ENODEV;
-
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
/*
* in write through (journal only)
@@ -1592,8 +1589,6 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log)
- return;
if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
@@ -2448,7 +2443,6 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
raid5_release_stripe(sh);
}
- md_wakeup_thread(conf->mddev->thread);
/* reuse conf->wait_for_quiescent in recovery */
wait_event(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0);
@@ -2491,10 +2485,10 @@ static int r5l_recovery_log(struct r5l_log *log)
ctx->seq += 10000;
if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
- pr_debug("md/raid:%s: starting from clean shutdown\n",
+ pr_info("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else
- pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
+ pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx->data_only_stripes,
ctx->data_parity_stripes);
@@ -3036,6 +3030,23 @@ ioerr:
return ret;
}
+int r5l_start(struct r5l_log *log)
+{
+ int ret;
+
+ if (!log)
+ return 0;
+
+ ret = r5l_load_log(log);
+ if (ret) {
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ r5l_exit_log(conf);
+ }
+ return ret;
+}
+
void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
@@ -3138,13 +3149,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
rcu_assign_pointer(conf->log, log);
- if (r5l_load_log(log))
- goto error;
-
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
-error:
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 284578b0a349..0c76bcedfc1c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -32,6 +32,7 @@ extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev,
struct md_rdev *rdev);
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+extern int r5l_start(struct r5l_log *log);
extern struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
@@ -42,6 +43,7 @@ extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
extern void ppl_write_stripe_run(struct r5conf *conf);
extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+extern void ppl_quiesce(struct r5conf *conf, int quiesce);
static inline bool raid5_has_ppl(struct r5conf *conf)
{
@@ -87,6 +89,34 @@ static inline void log_write_stripe_run(struct r5conf *conf)
ppl_write_stripe_run(conf);
}
+static inline void log_flush_stripe_to_raid(struct r5conf *conf)
+{
+ if (conf->log)
+ r5l_flush_stripe_to_raid(conf->log);
+ else if (raid5_has_ppl(conf))
+ ppl_write_stripe_run(conf);
+}
+
+static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
+{
+ int ret = -ENODEV;
+
+ if (conf->log)
+ ret = r5l_handle_flush_request(conf->log, bio);
+ else if (raid5_has_ppl(conf))
+ ret = 0;
+
+ return ret;
+}
+
+static inline void log_quiesce(struct r5conf *conf, int quiesce)
+{
+ if (conf->log)
+ r5l_quiesce(conf->log, quiesce);
+ else if (raid5_has_ppl(conf))
+ ppl_quiesce(conf, quiesce);
+}
+
static inline void log_exit(struct r5conf *conf)
{
if (conf->log)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 628c0bf7b9fd..2764c2290062 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -85,6 +85,9 @@
* (for a single member disk). New io_units are added to the end of the list
* and the first io_unit is submitted, if it is not submitted already.
* The current io_unit accepting new stripes is always at the end of the list.
+ *
+ * If write-back cache is enabled for any of the disks in the array, its data
+ * must be flushed before next io_unit is submitted.
*/
#define PPL_SPACE_SIZE (128 * 1024)
@@ -104,6 +107,7 @@ struct ppl_conf {
struct kmem_cache *io_kc;
mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *flush_bs;
/* used only for recovery */
int recovered_entries;
@@ -128,6 +132,8 @@ struct ppl_log {
sector_t next_io_sector;
unsigned int entry_space;
bool use_multippl;
+ bool wb_cache_on;
+ unsigned long disk_flush_bitmap;
};
#define PPL_IO_INLINE_BVECS 32
@@ -145,6 +151,7 @@ struct ppl_io_unit {
struct list_head stripe_list; /* stripes added to the io_unit */
atomic_t pending_stripes; /* how many stripes not written to raid */
+ atomic_t pending_flushes; /* how many disk flushes are in progress */
bool submitted; /* true if write to log started */
@@ -249,6 +256,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
+ atomic_set(&io->pending_flushes, 0);
bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
pplhdr = page_address(io->header_page);
@@ -475,7 +483,18 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (log->use_multippl)
log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
+ WARN_ON(log->disk_flush_bitmap != 0);
+
list_for_each_entry(sh, &io->stripe_list, log_list) {
+ for (i = 0; i < sh->disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if ((ppl_conf->child_logs[i].wb_cache_on) &&
+ (test_bit(R5_Wantwrite, &dev->flags))) {
+ set_bit(i, &log->disk_flush_bitmap);
+ }
+ }
+
/* entries for full stripe writes have no partial parity */
if (test_bit(STRIPE_FULL_WRITE, &sh->state))
continue;
@@ -540,6 +559,7 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
{
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
unsigned long flags;
pr_debug("%s: seq: %llu\n", __func__, io->seq);
@@ -565,6 +585,112 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
spin_unlock(&ppl_conf->no_mem_stripes_lock);
local_irq_restore(flags);
+
+ wake_up(&conf->wait_for_quiescent);
+}
+
+static void ppl_flush_endio(struct bio *bio)
+{
+ struct ppl_io_unit *io = bio->bi_private;
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ char b[BDEVNAME_SIZE];
+
+ pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+
+ if (bio->bi_status) {
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
+ if (rdev)
+ md_error(rdev->mddev, rdev);
+ rcu_read_unlock();
+ }
+
+ bio_put(bio);
+
+ if (atomic_dec_and_test(&io->pending_flushes)) {
+ ppl_io_unit_finished(io);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+static void ppl_do_flush(struct ppl_io_unit *io)
+{
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ int raid_disks = conf->raid_disks;
+ int flushed_disks = 0;
+ int i;
+
+ atomic_set(&io->pending_flushes, raid_disks);
+
+ for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
+ struct md_rdev *rdev;
+ struct block_device *bdev = NULL;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ bdev = rdev->bdev;
+ rcu_read_unlock();
+
+ if (bdev) {
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+
+ bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs);
+ bio_set_dev(bio, bdev);
+ bio->bi_private = io;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_end_io = ppl_flush_endio;
+
+ pr_debug("%s: dev: %s\n", __func__,
+ bio_devname(bio, b));
+
+ submit_bio(bio);
+ flushed_disks++;
+ }
+ }
+
+ log->disk_flush_bitmap = 0;
+
+ for (i = flushed_disks ; i < raid_disks; i++) {
+ if (atomic_dec_and_test(&io->pending_flushes))
+ ppl_io_unit_finished(io);
+ }
+}
+
+static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
+ struct ppl_log *log)
+{
+ struct ppl_io_unit *io;
+
+ io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
+ log_sibling);
+
+ return !io || !io->submitted;
+}
+
+void ppl_quiesce(struct r5conf *conf, int quiesce)
+{
+ struct ppl_conf *ppl_conf = conf->log_private;
+ int i;
+
+ if (quiesce) {
+ for (i = 0; i < ppl_conf->count; i++) {
+ struct ppl_log *log = &ppl_conf->child_logs[i];
+
+ spin_lock_irq(&log->io_list_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ ppl_no_io_unit_submitted(conf, log),
+ log->io_list_lock);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
}
void ppl_stripe_write_finished(struct stripe_head *sh)
@@ -574,8 +700,12 @@ void ppl_stripe_write_finished(struct stripe_head *sh)
io = sh->ppl_io;
sh->ppl_io = NULL;
- if (io && atomic_dec_and_test(&io->pending_stripes))
- ppl_io_unit_finished(io);
+ if (io && atomic_dec_and_test(&io->pending_stripes)) {
+ if (io->log->disk_flush_bitmap)
+ ppl_do_flush(io);
+ else
+ ppl_io_unit_finished(io);
+ }
}
static void ppl_xor(int size, struct page *page1, struct page *page2)
@@ -1108,6 +1238,8 @@ static void __ppl_exit_log(struct ppl_conf *ppl_conf)
if (ppl_conf->bs)
bioset_free(ppl_conf->bs);
+ if (ppl_conf->flush_bs)
+ bioset_free(ppl_conf->flush_bs);
mempool_destroy(ppl_conf->io_pool);
kmem_cache_destroy(ppl_conf->io_kc);
@@ -1173,6 +1305,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
{
+ struct request_queue *q;
+
if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
PPL_HEADER_SIZE) * 2) {
log->use_multippl = true;
@@ -1185,6 +1319,10 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
PPL_HEADER_SIZE;
}
log->next_io_sector = rdev->ppl.sector;
+
+ q = bdev_get_queue(rdev->bdev);
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ log->wb_cache_on = true;
}
int ppl_init_log(struct r5conf *conf)
@@ -1192,8 +1330,8 @@ int ppl_init_log(struct r5conf *conf)
struct ppl_conf *ppl_conf;
struct mddev *mddev = conf->mddev;
int ret = 0;
+ int max_disks;
int i;
- bool need_cache_flush = false;
pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
mdname(conf->mddev));
@@ -1219,6 +1357,14 @@ int ppl_init_log(struct r5conf *conf)
return -EINVAL;
}
+ max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+ BITS_PER_BYTE;
+ if (conf->raid_disks > max_disks) {
+ pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
+ mdname(mddev), max_disks);
+ return -EINVAL;
+ }
+
ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
if (!ppl_conf)
return -ENOMEM;
@@ -1244,6 +1390,12 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
+ ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0);
+ if (!ppl_conf->flush_bs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ppl_conf->count = conf->raid_disks;
ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
GFP_KERNEL);
@@ -1275,23 +1427,14 @@ int ppl_init_log(struct r5conf *conf)
log->rdev = rdev;
if (rdev) {
- struct request_queue *q;
-
ret = ppl_validate_rdev(rdev);
if (ret)
goto err;
- q = bdev_get_queue(rdev->bdev);
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- need_cache_flush = true;
ppl_init_child_log(log, rdev);
}
}
- if (need_cache_flush)
- pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
- mdname(mddev));
-
/* load and possibly recover the logs from the member disks */
ret = ppl_load(ppl_conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 98ce4272ace9..50d01144b805 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5563,7 +5563,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
- int ret = r5l_handle_flush_request(conf->log, bi);
+ int ret = log_handle_flush_request(conf, bi);
if (ret == 0)
return true;
@@ -6168,7 +6168,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
break;
if (i == NR_STRIPE_HASH_LOCKS) {
spin_unlock_irq(&conf->device_lock);
- r5l_flush_stripe_to_raid(conf->log);
+ log_flush_stripe_to_raid(conf);
spin_lock_irq(&conf->device_lock);
return batch_size;
}
@@ -8060,7 +8060,7 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
}
- r5l_quiesce(conf->log, quiesce);
+ log_quiesce(conf, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
@@ -8364,6 +8364,13 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
return err;
}
+static int raid5_start(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ return r5l_start(conf->log);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8371,6 +8378,7 @@ static struct md_personality raid6_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8395,6 +8403,7 @@ static struct md_personality raid5_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8420,6 +8429,7 @@ static struct md_personality raid4_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 3dba3aa34a43..9d6c496f756c 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -43,13 +43,13 @@ static inline struct cec_devnode *cec_devnode_data(struct file *filp)
/* CEC file operations */
-static unsigned int cec_poll(struct file *filp,
+static __poll_t cec_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
- unsigned int res = 0;
+ __poll_t res = 0;
if (!devnode->registered)
return POLLERR | POLLHUP;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 8c87d6837c49..8ee3eebef4db 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -320,13 +320,13 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
@@ -359,10 +359,10 @@ static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wai
return res;
}
-static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
- unsigned int res;
+ __poll_t res;
mutex_lock(vdev->lock);
res = __fops_poll(file, wait);
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 1a8677ade391..0c0878bcf251 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -374,7 +374,7 @@ exit:
return rc;
}
-static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
+static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait)
{
struct smsdvb_debugfs *debug_data = file->private_data;
int rc;
@@ -384,12 +384,9 @@ static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
poll_wait(file, &debug_data->stats_queue, wait);
rc = smsdvb_stats_wait_read(debug_data);
- if (rc > 0)
- rc = POLLIN | POLLRDNORM;
-
kref_put(&debug_data->refcount, smsdvb_debugfs_data_release);
- return rc;
+ return rc > 0 ? POLLIN | POLLRDNORM : 0;
}
static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 3ddd44e1ee77..3fe0eb740a6d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1066,10 +1066,10 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
-static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
return POLLERR;
@@ -1160,11 +1160,11 @@ static long dvb_dvr_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
-static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk("%s\n", __func__);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index d48b61eb01f4..3f6c8bd8f869 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1782,11 +1782,11 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
*
* return: Standard poll mask.
*/
-static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int slot;
int result = 0;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2afaa8226342..48e16fd003a7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2470,7 +2470,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
}
-static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index edbb30fdd9d9..fb8a1d2ffd24 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -207,7 +207,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
return err;
}
-static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
+static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait)
{
return POLLIN;
}
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 423248f577b6..3049b1f505e5 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -99,7 +99,7 @@ static ssize_t media_write(struct file *filp, const char __user *buf,
return devnode->fops->write(filp, buf, sz, off);
}
-static unsigned int media_poll(struct file *filp,
+static __poll_t media_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct media_devnode *devnode = media_devnode_data(filp);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b366a7e1d976..c988669e22ff 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2955,13 +2955,13 @@ static ssize_t bttv_read(struct file *file, char __user *data,
return retval;
}
-static unsigned int bttv_poll(struct file *file, poll_table *wait)
+static __poll_t bttv_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv_buffer *buf;
enum v4l2_field field;
- unsigned int rc = 0;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+ __poll_t req_events = poll_requested_events(wait);
if (v4l2_event_pending(&fh->fh))
rc = POLLPRI;
@@ -3329,13 +3329,13 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa6588_command cmd;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 4f9c2395941b..2b0abd5bbf64 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -602,14 +602,14 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
}
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx18_open_id *id = file2id(filp);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
@@ -629,7 +629,7 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(id->type == CX18_ENC_STREAM_TYPE_YUV)) {
- int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
+ __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
if (v4l2_event_pending(&id->fh))
res |= POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 37ef34e866cb..5b44d30efd8f 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -23,7 +23,7 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t *pos);
int cx18_v4l2_close(struct file *filp);
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
int cx18_start_capture(struct cx18_open_id *id);
void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index f4bd4908acdd..09a25d6c2cd1 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -732,13 +732,13 @@ static ssize_t ts_read(struct file *file, __user char *buf,
return (count && (left == count)) ? -EAGAIN : (count - left);
}
-static unsigned int ts_poll(struct file *file, poll_table *wait)
+static __poll_t ts_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &input->dma->wq, wait);
poll_wait(file, &output->dma->wq, wait);
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..4aa773507201 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -730,12 +730,12 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
return res;
}
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
- int res = 0;
+ __poll_t res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
@@ -764,14 +764,14 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index 5e08800772ca..e0029b2b8d09 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -28,8 +28,8 @@ ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t * pos);
int ivtv_v4l2_close(struct file *filp);
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
int ivtv_start_capture(struct ivtv_open_id *id);
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end);
int ivtv_start_decoding(struct ivtv_open_id *id, int speed);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 23999a8cef37..f74b08635082 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1423,9 +1423,9 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
}
-static unsigned int meye_poll(struct file *file, poll_table *wait)
+static __poll_t meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 82d2a24644e4..0ceaa3473cf2 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1227,11 +1227,11 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
- unsigned int rc = v4l2_ctrl_poll(file, wait);
+ __poll_t rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index f21c245a54f7..e7b31a5b14fd 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -909,13 +909,13 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = v4l2_ctrl_poll(file, wait);
+ __poll_t mask = v4l2_ctrl_poll(file, wait);
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 9255d7d23947..6f97c8f2e00d 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -614,11 +614,11 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = 0;
+ __poll_t mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 2aa4ba675194..4d10e2f979d2 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -937,11 +937,11 @@ static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event
* DVB device file operations
******************************************************************************/
-static unsigned int dvb_video_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
@@ -989,11 +989,11 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
-static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 1fe49171d823..96ca227cf51b 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -223,13 +223,13 @@ static int dvb_ca_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
+static __poll_t dvb_ca_poll (struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(8, "av7110:%p\n",av7110);
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d07840072337..b6a6c4f171d0 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2501,13 +2501,13 @@ static int zoran_s_jpegcomp(struct file *file, void *__fh,
return res;
}
-static unsigned int
+static __poll_t
zoran_poll (struct file *file,
poll_table *wait)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
- int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
int frame;
unsigned long flags;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7b3f6f8e3dc8..cf65b39807fe 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -730,7 +730,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll: It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 2a2994ef15d5..b2dc524112f7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -707,12 +707,12 @@ static int gsc_m2m_release(struct file *file)
return 0;
}
-static unsigned int gsc_m2m_poll(struct file *file,
+static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
- unsigned int ret;
+ __poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index dba21215dc84..de285a269390 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1263,13 +1263,13 @@ static ssize_t viu_read(struct file *file, char __user *data, size_t count,
return 0;
}
-static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
{
struct viu_fh *fh = file->private_data;
struct videobuf_queue *q = &fh->vb_vidq;
struct viu_dev *dev = fh->dev;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c8a12493f395..945ef1e2ccc7 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -950,11 +950,11 @@ static int deinterlace_release(struct file *file)
return 0;
}
-static unsigned int deinterlace_poll(struct file *file,
+static __poll_t deinterlace_poll(struct file *file,
struct poll_table_struct *wait)
{
struct deinterlace_ctx *ctx = file->private_data;
- int ret;
+ __poll_t ret;
deinterlace_lock(ctx);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4a2b1afa19c4..5a8eff60e95f 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -838,12 +838,12 @@ static int emmaprp_release(struct file *file)
return 0;
}
-static unsigned int emmaprp_poll(struct file *file,
+static __poll_t emmaprp_poll(struct file *file,
struct poll_table_struct *wait)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
struct emmaprp_ctx *ctx = file->private_data;
- unsigned int res;
+ __poll_t res;
mutex_lock(&pcdev->dev_mutex);
res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6f1b0c799e58..abb14ee20538 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -839,7 +839,7 @@ static void omap_vout_buffer_release(struct videobuf_queue *q,
/*
* File operations
*/
-static unsigned int omap_vout_poll(struct file *file,
+static __poll_t omap_vout_poll(struct file *file,
struct poll_table_struct *wait)
{
struct omap_vout_device *vout = file->private_data;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 218e6d7ae93a..a751c89a3ea8 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1383,11 +1383,11 @@ static int isp_video_release(struct file *file)
return 0;
}
-static unsigned int isp_video_poll(struct file *file, poll_table *wait)
+static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
struct isp_video *video = video_drvdata(file);
- int ret;
+ __poll_t ret;
mutex_lock(&video->queue_lock);
ret = vb2_poll(&vfh->queue, file, wait);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 25c7a7d42292..437395a61065 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -590,12 +590,12 @@ static int s3c_camif_close(struct file *file)
return ret;
}
-static unsigned int s3c_camif_poll(struct file *file,
+static __poll_t s3c_camif_poll(struct file *file,
struct poll_table_struct *wait)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
- int ret;
+ __poll_t ret;
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index bc68dbbcaec1..fe94bd6b705e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -988,14 +988,14 @@ static int s5p_mfc_release(struct file *file)
}
/* Poll */
-static unsigned int s5p_mfc_poll(struct file *file,
+static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
mutex_lock(&dev->mfc_mutex);
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index dedc1b024f6f..976ea0bb5b6c 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1016,7 +1016,7 @@ static int sh_veu_release(struct file *file)
return 0;
}
-static unsigned int sh_veu_poll(struct file *file,
+static __poll_t sh_veu_poll(struct file *file,
struct poll_table_struct *wait)
{
struct sh_veu_file *veu_file = file->private_data;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 36762ec954e7..9b069783e3ed 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1553,7 +1553,7 @@ static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
return ret;
}
-static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 916ff68b73d4..d964c072832c 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -805,11 +805,11 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
return err;
}
-static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+static __poll_t soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- unsigned res = POLLERR;
+ __poll_t res = POLLERR;
if (icd->streamer != file)
return POLLERR;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 805d4a8fc17e..f77be9302120 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -764,7 +764,7 @@ out_unlock:
}
-static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
{
struct via_camera *cam = video_drvdata(filp);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5f316a5e38db..c80239592088 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -416,7 +416,7 @@ static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
return vivid_radio_tx_write(file, buf, size, offset);
}
-static unsigned int vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
index 47c36c26096b..71f3ebb7aecf 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.c
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -141,7 +141,7 @@ retry:
return i;
}
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
index 1077d8f061eb..2b33edb60942 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.h
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_RX_H_
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
index 0e8025b7b4dd..f0917f4e7d8c 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.c
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -105,7 +105,7 @@ retry:
return i;
}
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
index 7f8ff7547119..3c3343d70cbc 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.h
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_TX_H_
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 7575e5370a49..dba611003a00 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -481,11 +481,11 @@ static int cadet_release(struct file *file)
return 0;
}
-static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 540ac887a63c..49293dd707b9 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1153,12 +1153,12 @@ static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf,
return rval;
}
-static unsigned int si476x_radio_fops_poll(struct file *file,
+static __poll_t si476x_radio_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si476x_radio *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- unsigned int err = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t err = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
if (atomic_read(&radio->core->is_alive))
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 3cbdc085c65d..f92b0f9241a9 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1089,7 +1089,7 @@ out:
return r;
}
-static unsigned int wl1273_fm_fops_poll(struct file *file,
+static __poll_t wl1273_fm_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index c89a7d5b8c55..68fe9e5c7a70 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -507,12 +507,12 @@ done:
/*
* si470x_fops_poll - poll RDS data
*/
-static unsigned int si470x_fops_poll(struct file *file,
+static __poll_t si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- int retval = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t retval = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
/* switch on rds reception */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fc5a7abc83d2..fd603c1b96bb 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -102,7 +102,7 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
return sizeof(rds);
}
-static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
{
int ret;
struct fmdev *fmdev;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index e16d1138ca48..aab53641838b 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -272,10 +272,10 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(lirc_dev_fop_close);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
struct lirc_dev *d = file->private_data;
- unsigned int ret;
+ __poll_t ret;
if (!d->attached)
return POLLHUP | POLLERR;
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index 81f72c0b561f..ab238ac8bfc0 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -444,7 +444,7 @@ int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock);
-unsigned int cpia2_poll(struct camera_data *cam,
+__poll_t cpia2_poll(struct camera_data *cam,
struct file *filp, poll_table *wait);
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 0efba0da0a45..e7524920c618 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -2370,10 +2370,10 @@ long cpia2_read(struct camera_data *cam,
* cpia2_poll
*
*****************************************************************************/
-unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
+__poll_t cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status = v4l2_ctrl_poll(filp, wait);
+ __poll_t status = v4l2_ctrl_poll(filp, wait);
if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
!cam->streaming) {
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 3dedd83f0b19..74c97565ccc6 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -169,10 +169,10 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
* cpia2_v4l_poll
*
*****************************************************************************/
-static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- unsigned int res;
+ __poll_t res;
mutex_lock(&cam->v4l2_lock);
res = cpia2_poll(cam, filp, wait);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index d538fa407742..103e3299b77f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1812,13 +1812,13 @@ static ssize_t mpeg_read(struct file *file, char __user *data,
file->f_flags & O_NONBLOCK);
}
-static unsigned int mpeg_poll(struct file *file,
+static __poll_t mpeg_poll(struct file *file,
struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res |= POLLPRI;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 226059fc672b..d7b2e694bbb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2006,12 +2006,12 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
+static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
- unsigned res = 0;
+ __poll_t res = 0;
int rc;
rc = check_dev(dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 961343873fd0..b72d02e225fd 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1862,11 +1862,11 @@ out:
return ret;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- int ret = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t ret = 0;
PDEBUG(D_FRAM, "poll");
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7fb036d6a86e..d0d638c2e900 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -521,12 +521,12 @@ err:
return ret;
}
-static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
+static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct hdpvr_buffer *buf = NULL;
struct hdpvr_device *dev = video_drvdata(filp);
- unsigned int mask = v4l2_ctrl_poll(filp, wait);
+ __poll_t mask = v4l2_ctrl_poll(filp, wait);
if (!(req_events & (POLLIN | POLLRDNORM)))
return mask;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 4320bda9352d..11cdfe356801 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1190,9 +1190,9 @@ static ssize_t pvr2_v4l2_read(struct file *file,
}
-static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct pvr2_v4l2_fh *fh = file->private_data;
int ret;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c0bba773db25..cba0916d33e5 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -721,10 +721,10 @@ static ssize_t v4l_stk_read(struct file *fp, char __user *buf,
return ret;
}
-static unsigned int v4l_stk_poll(struct file *fp, poll_table *wait)
+static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait)
{
struct stk_camera *dev = video_drvdata(fp);
- unsigned res = v4l2_ctrl_poll(fp, wait);
+ __poll_t res = v4l2_ctrl_poll(fp, wait);
poll_wait(fp, &dev->wait_frame, wait);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 9fa25de6b5a9..317bf5a3933e 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1423,13 +1423,13 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
return 0;
}
-static unsigned int
+static __poll_t
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
- int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
@@ -1457,11 +1457,11 @@ __tm6000_poll(struct file *file, struct poll_table_struct *wait)
return res;
}
-static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
struct tm6000_fh *fh = file->private_data;
struct tm6000_core *dev = fh->dev;
- unsigned int res;
+ __poll_t res;
mutex_lock(&dev->lock);
res = __tm6000_poll(file, wait);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index c8d78b2f3de4..692c463f14f7 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -340,10 +340,10 @@ unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
}
#endif
-unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
- unsigned int ret;
+ __poll_t ret;
mutex_lock(&queue->mutex);
ret = vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 3e7e283a44a8..381f614b2f4c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_mapping32 *p = (void *)kp;
+ compat_caddr_t info;
+ u32 count;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
- __get_user(kp->menu_count, &up->menu_count))
+ if (copy_from_user(p, up, sizeof(*p)))
return -EFAULT;
- memset(kp->reserved, 0, sizeof(kp->reserved));
-
- if (kp->menu_count == 0) {
- kp->menu_info = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->menu_info))
- return -EFAULT;
- kp->menu_info = compat_ptr(p);
+ count = p->menu_count;
+ info = p->menu_info;
+ memset(kp->reserved, 0, sizeof(kp->reserved));
+ kp->menu_info = count ? compat_ptr(info) : NULL;
+ kp->menu_count = count;
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
- __put_user(kp->menu_count, &up->menu_count))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
+ put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- if (__clear_user(up->reserved, sizeof(up->reserved)))
+ if (clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
@@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_query32 v;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), data)))
+ if (copy_from_user(&v, up, sizeof(v)))
return -EFAULT;
- if (kp->size == 0) {
- kp->data = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->data))
- return -EFAULT;
- kp->data = compat_ptr(p);
-
+ *kp = (struct uvc_xu_control_query){
+ .unit = v.unit,
+ .selector = v.selector,
+ .query = v.query,
+ .size = v.size,
+ .data = v.size ? compat_ptr(v.data) : NULL
+ };
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), data)))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
-
return 0;
}
@@ -1423,7 +1412,7 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvc_queue_mmap(&stream->queue, vma);
}
-static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 05398784d1c8..9b44a7cd5ec1 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -678,7 +678,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
extern int uvc_queue_mmap(struct uvc_video_queue *queue,
struct vm_area_struct *vma);
-extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+extern __poll_t uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
extern unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1d888661fd03..8b7c19943d46 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1287,12 +1287,12 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static unsigned int zr364xx_poll(struct file *file,
+static __poll_t zr364xx_poll(struct file *file,
struct poll_table_struct *wait)
{
struct zr364xx_camera *cam = video_drvdata(file);
struct videobuf_queue *q = &cam->vb_vidq;
- unsigned res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
_DBG("%s\n", __func__);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index cbb2ef43945f..b07657149434 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3457,7 +3457,7 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c647ba648805..8ad8c1627768 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -331,10 +331,10 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- unsigned int res = POLLERR | POLLHUP;
+ __poll_t res = POLLERR | POLLHUP;
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index bc580fbe18fa..186156f8952a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -500,14 +500,14 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
@@ -794,11 +794,11 @@ int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
struct v4l2_fh *fh = file->private_data;
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
- unsigned int ret;
+ __poll_t ret;
if (m2m_ctx->q_lock)
mutex_lock(m2m_ctx->q_lock);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 43fefa73e0a3..28966fa8c610 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -469,7 +469,7 @@ static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int subdev_poll(struct file *file, poll_table *wait)
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index e87fb13b22dc..9a89d3ae170f 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1118,13 +1118,13 @@ done:
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
videobuf_queue_lock(q);
if (q->streaming) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a8589d96ef72..0d9f772d6d03 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2018,10 +2018,10 @@ void vb2_core_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
unsigned long flags;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 4075314a6989..a49f7eb98c2e 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -671,11 +671,11 @@ void vb2_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = 0;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
@@ -904,12 +904,12 @@ exit:
}
EXPORT_SYMBOL_GPL(vb2_fop_read);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *q = vdev->queue;
struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned res;
+ __poll_t res;
void *fileio;
/*
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..90a66b3f7ae1 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/mach-types.h>
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
}
EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
int gpmc_get_client_irq(unsigned irq_config)
{
if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_ONENAND)
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- struct omap_onenand_platform_data *gpmc_onenand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
- child);
- return -ENODEV;
- }
-
- gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
- GFP_KERNEL);
- if (!gpmc_onenand_data)
- return -ENOMEM;
-
- gpmc_onenand_data->cs = val;
- gpmc_onenand_data->of_node = child;
- gpmc_onenand_data->dma_channel = -1;
-
- if (!of_property_read_u32(child, "dma-channel", &val))
- gpmc_onenand_data->dma_channel = val;
-
- return gpmc_onenand_init(gpmc_onenand_data);
-}
-#else
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
/**
* gpmc_probe_generic_child - configures the gpmc for a child device
* @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
+ if (of_node_cmp(child->name, "onenand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
if (of_device_is_compatible(child, "ti,omap2-nand")) {
/* NAND specific setup */
val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
} else {
ret = of_property_read_u32(child, "bank-width",
&gpmc_s.device_width);
- if (ret < 0) {
- dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n",
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
child);
goto err;
}
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
if (!child->name)
continue;
- if (of_node_cmp(child->name, "onenand") == 0)
- ret = gpmc_probe_onenand_child(pdev, child);
- else
- ret = gpmc_probe_generic_child(pdev, child);
-
+ ret = gpmc_probe_generic_child(pdev, child);
if (ret) {
dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
child->name, ret);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 7310e32b5991..aa2b0786bbe9 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -45,7 +45,7 @@ config MEMSTICK_R592
config MEMSTICK_REALTEK_PCI
tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support Memstick card interface
of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support Memstick card interface
of Realtek RTS5129/39 series USB card reader
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 818fa94354ae..a44b4578ba4d 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_ms {
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 2e3cf012ef48..4f64563df7de 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -25,7 +25,7 @@
#include <linux/workqueue.h>
#include <linux/memstick.h>
#include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/sched.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7a93400eea2a..51eb1b027963 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
@@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
@@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
- pChain->NextChainOffset = next;
- pChain->Address = cpu_to_le32(dma_addr);
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+ u32 tmp = dma_addr & 0xFFFFFFFF;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
- MPI_SGE_FLAGS_64_BIT_ADDRESSING);
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
- pChain->NextChainOffset = next;
+ pChain->NextChainOffset = next;
- pChain->Address.Low = cpu_to_le32(tmp);
- tmp = (u32)(upper_32_bits(dma_addr));
- pChain->Address.High = cpu_to_le32(tmp);
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits(dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
-return 0;
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
device_state);
/* put ioc into READY_STATE */
- if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+ if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
@@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
- int in_isr;
+ int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
@@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
break;
}
if (ds)
- strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
+ strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
-union loginfo_type {
- u32 loginfo;
- struct {
- u32 subcode:16;
- u32 code:8;
- u32 originator:4;
- u32 bus_type:4;
- }dw;
-};
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7b3b41368931..8d12017b9893 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
else
karg.host_no = -1;
- /* Reformat the fw_version into a string
- */
- karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
- ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
- karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
- karg.fw_version[2] = '.';
- karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
- ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
- karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
- karg.fw_version[5] = '.';
- karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
- ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
- karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
- karg.fw_version[8] = '.';
- karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
- ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
- karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
- karg.fw_version[11] = '\0';
+ /* Reformat the fw_version into a string */
+ snprintf(karg.fw_version, sizeof(karg.fw_version),
+ "%.2hhu.%.2hhu.%.2hhu.%.2hhu",
+ ioc->facts.FWVersion.Struct.Major,
+ ioc->facts.FWVersion.Struct.Minor,
+ ioc->facts.FWVersion.Struct.Unit,
+ ioc->facts.FWVersion.Struct.Dev);
/* Issue a config request to get the device serial number
*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 345f6035599e..439ee9c5f535 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
* issue target reset to next device in the queue
*/
- head = &hd->target_reset_list;
if (list_empty(head))
return;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1d20a800e967..b860eb5aa194 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config MFD_CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ select CROS_EC_CTL
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON)
+ depends on (ARM || HEXAGON || COMPILE_TEST)
select IRQ_DOMAIN
select MFD_CORE
select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
-config MFD_RTSX_PCI
- tristate "Realtek PCI-E card reader"
- depends on PCI
- select MFD_CORE
- help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick, Memory Stick Pro, Secure Digital and
- MultiMediaCard.
-
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
-config MFD_RTSX_USB
- tristate "Realtek USB card reader"
- depends on USB
- select MFD_CORE
- help
- Select this option to get support for Realtek USB 2.0 card readers
- including RTS5129, RTS5139, RTS5179 and RTS5170.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
System Registers are the platform configuration block
on the ARM Ltd. Versatile Express board.
+config RAVE_SP_CORE
+ tristate "RAVE SP MCU core driver"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ Select this to get support for the Supervisory Processor
+ device found on several devices in RAVE line of hardware.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d9474ade32e6..d9d2cf0d32ef 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c815241e02..1afa27de7191 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
return 0;
}
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
return ab8500_registers_print(dev, bank, s);
}
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
num_interrupts[line]++;
}
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
/*
* - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
#define AB8500_LAST_SIM_REG 0x8B
#define AB8505_LAST_SIM_REG 0x8C
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
err = abx500_get_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
/* Config 1 will allow APE side to read SIM registers */
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
AB8500_SUPPLY_CONTROL_CONFIG_1);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
err = abx500_get_register_interruptible(dev,
bank, reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
+
return 0;
-}
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_modem_registers,
- inode->i_private);
+report_read_failure:
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+
+report_write_failure:
+ dev_err(dev, "ab->write fail %d\n", err);
+ return err;
}
-static const struct file_operations ab8500_modem_fops = {
- .open = ab8500_modem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bat_ctrl_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
- .open = ab8500_gpadc_bat_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
{
int btemp_ball_raw;
int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_btemp_ball_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
- .open = ab8500_gpadc_btemp_ball_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
{
int main_charger_v_raw;
int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
- .open = ab8500_gpadc_main_charger_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
{
int acc_detect1_raw;
int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect1_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
- .open = ab8500_gpadc_acc_detect1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
{
int acc_detect2_raw;
int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect2_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
- .open = ab8500_gpadc_acc_detect2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
{
int aux1_raw;
int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-static const struct file_operations ab8500_gpadc_aux1_fops = {
- .open = ab8500_gpadc_aux1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
{
int aux2_raw;
int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
- .open = ab8500_gpadc_aux2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
{
int main_bat_v_raw;
int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_bat_v_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
- .open = ab8500_gpadc_main_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
{
int vbus_v_raw;
int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
- .open = ab8500_gpadc_vbus_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
{
int main_charger_c_raw;
int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
- .open = ab8500_gpadc_main_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
{
int usb_charger_c_raw;
int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
- .open = ab8500_gpadc_usb_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
{
int bk_bat_v_raw;
int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bk_bat_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
- .open = ab8500_gpadc_bk_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
{
int die_temp_raw;
int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_die_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
- .open = ab8500_gpadc_die_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
{
int usb_id_raw;
int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
- .open = ab8500_gpadc_usb_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
{
int xtal_temp_raw;
int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_xtal_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
- .open = ab8540_gpadc_xtal_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
- .open = ab8540_gpadc_vbat_true_meas_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
- .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_meas_raw;
int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
- void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
{
int bat_temp_raw;
int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
- .open = ab8540_gpadc_bat_temp_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
{
struct ab8500_gpadc *gpadc;
u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
- .open = ab8540_gpadc_otp_cal_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
-static const struct file_operations ab8500_interrupts_fops = {
- .open = ab8500_interrupts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_registers_fops);
+ &plf->dev, &ab8500_bank_registers_fops);
if (!file)
goto err;
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 09cf3699e354..a307832d7e45 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
};
static struct lock_class_key arizona_irq_lock_class;
+static struct lock_class_key arizona_irq_request_class;
static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
struct arizona *data = h->host_data;
irq_set_chip_data(virq, data);
- irq_set_lockdep_class(virq, &arizona_irq_lock_class);
+ irq_set_lockdep_class(virq, &arizona_irq_lock_class,
+ &arizona_irq_request_class);
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
irq_set_noprobe(virq);
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index 064bde9cff5a..f684a93a3340 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -39,34 +39,43 @@
#define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) & \
FLEX_MR_OPMODE_MASK)
+struct atmel_flexcom {
+ void __iomem *base;
+ u32 opmode;
+ struct clk *clk;
+};
static int atmel_flexcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk *clk;
struct resource *res;
- void __iomem *base;
- u32 opmode;
+ struct atmel_flexcom *ddata;
int err;
- err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
if (err)
return err;
- if (opmode < ATMEL_FLEXCOM_MODE_USART ||
- opmode > ATMEL_FLEXCOM_MODE_TWI)
+ if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+ ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return PTR_ERR(ddata->clk);
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(ddata->clk);
if (err)
return err;
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
* inaccessible and are read as zero. Also the external I/O lines of the
* Flexcom are muxed to reach the selected device.
*/
- writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+ writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(ddata->clk);
return devm_of_platform_populate(&pdev->dev);
}
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+ struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = clk_prepare_enable(ddata->clk);
+ if (err)
+ return err;
+
+ val = FLEX_MR_OPMODE(ddata->opmode),
+ writel(val, ddata->base + FLEX_MR);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+ atmel_flexcom_resume);
+
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
+ .pm = &atmel_flexcom_pm_ops,
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 2468b431bb22..e94c72c2faa2 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index b0ca5a4c841e..d61024141e2b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
};
static const struct mfd_cell ec_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &ec_p,
.pdata_size = sizeof(ec_p),
};
static const struct mfd_cell ec_pd_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &pd_p,
.pdata_size = sizeof(pd_p),
};
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index cf6c4f0846b8..e4fafdd96e5e 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -25,9 +25,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "cros_ec_debugfs.h"
#include "cros_ec_dev.h"
+#define DRV_NAME "cros-ec-dev"
+
/* Device variables */
#define CROS_MAX_DEV 128
static int ec_major;
@@ -461,7 +462,7 @@ static int ec_device_remove(struct platform_device *pdev)
}
static const struct platform_device_id cros_ec_id[] = {
- { "cros-ec-ctl", 0 },
+ { DRV_NAME, 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, cros_ec_id);
@@ -493,7 +494,7 @@ static const struct dev_pm_ops cros_ec_dev_pm_ops = {
static struct platform_driver cros_ec_dev_driver = {
.driver = {
- .name = "cros-ec-ctl",
+ .name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
.probe = ec_device_probe,
@@ -544,6 +545,7 @@ static void __exit cros_ec_dev_exit(void)
module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
MODULE_VERSION("1.0");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453608c5..45e9453608c5 100644
--- a/drivers/platform/chrome/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index c9714072e224..1b52b8557034 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -72,8 +72,7 @@
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- * if no record
+ * @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -377,19 +376,17 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 *ptr;
u8 *rx_buf;
u8 sum;
+ u8 rx_byte;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -421,25 +418,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_packet(ec_dev,
- ec_msg->insize + sizeof(*response));
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_packet(ec_dev,
+ ec_msg->insize + sizeof(*response));
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -508,20 +502,18 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
int i, len;
u8 *ptr;
u8 *rx_buf;
+ u8 rx_byte;
int sum;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -544,25 +536,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_response(ec_dev,
- ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_response(ec_dev,
+ ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +656,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_spi->last_transfer_ns = ktime_get_ns();
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e0ab9bb1530..9e545eb6e8b4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
if (ret)
goto err_remove_ltr;
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
return 0;
err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
static int resume_lpss_device(struct device *dev, void *data)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 36adf9e8153e..274306d98ac1 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -16,7 +16,6 @@
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 55d824b3a808..390b27cb2c2e 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
return -EINVAL;
pld->io_base = devm_ioport_map(dev, ioport->start,
- ioport->end - ioport->start);
+ resource_size(ioport));
if (!pld->io_base)
return -ENOMEM;
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index cf1120abbf52..53dc1a43472c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->end = res->start + SPIBASE_APL_SZ - 1;
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
info->writeable = !!(bcr & BCR_WPD);
}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index dc5caeaaa6a1..da9612dbb222 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 3922a93f9f92..663a2398b6b1 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ u8 powerhold_mask;
struct device_node *np = palmas_dev->dev->of_node;
if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
PALMAS_PRIMARY_SECONDARY_PAD2);
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+ if (of_device_is_compatible(np, "ti,tps65917"))
+ powerhold_mask =
+ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+ else
+ powerhold_mask =
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ powerhold_mask, 0);
if (ret)
dev_err(palmas_dev->dev,
"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6155d123a84e..f952dff6765f 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
- dev_err(pcf->dev, "Falied to allocate %s\n", name);
+ dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644
index 000000000000..5c858e784a89
--- /dev/null
+++ b/drivers/mfd/rave-sp.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ * - message to MCU => ACK response
+ * - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX 0x02
+#define RAVE_SP_ETX 0x03
+#define RAVE_SP_DLE 0x10
+
+#define RAVE_SP_MAX_DATA_SIZE 64
+#define RAVE_SP_CHECKSUM_SIZE 2 /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE \
+ (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE 2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE \
+ (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET 0
+#define RAVE_SP_BOOT_SOURCE_SET 1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
+
+#define RAVE_SP_BOOT_SOURCE_SD 0
+#define RAVE_SP_BOOT_SOURCE_EMMC 1
+#define RAVE_SP_BOOT_SOURCE_NOR 2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF: Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA: Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+ RAVE_SP_EXPECT_SOF,
+ RAVE_SP_EXPECT_DATA,
+ RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state: Current state of the deframer
+ * @data: Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+ enum rave_sp_deframer_state state;
+ unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+ size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length: Expected reply length
+ * @data: Buffer to store reply payload in
+ * @code: Expected reply code
+ * @ackid: Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+ size_t length;
+ void *data;
+ u8 code;
+ u8 ackid;
+ struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length: Caculated checksum length
+ * @subroutine: Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+ size_t length;
+ void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+ int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum: Variant specific checksum implementation
+ * @cmd: Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+ const struct rave_sp_checksum *checksum;
+ struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev: Pointer to underlying serdev
+ * @deframer: Stored state of the protocol deframer
+ * @ackid: ACK ID used in last reply sent to the device
+ * @bus_lock: Lock to serialize access to the device
+ * @reply_lock: Lock protecting @reply
+ * @reply: Pointer to memory to store reply payload
+ *
+ * @variant: Device variant specific information
+ * @event_notifier_list: Input event notification chain
+ *
+ */
+struct rave_sp {
+ struct serdev_device *serdev;
+ struct rave_sp_deframer deframer;
+ atomic_t ackid;
+ struct mutex bus_lock;
+ struct mutex reply_lock;
+ struct rave_sp_reply *reply;
+
+ const struct rave_sp_variant *variant;
+ struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+ return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block *nb = *(struct notifier_block **)res;
+ struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+ WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block **rcnb;
+ int ret;
+
+ rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+ sizeof(*rcnb), GFP_KERNEL);
+ if (!rcnb)
+ return -ENOMEM;
+
+ ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+ if (!ret) {
+ *rcnb = nb;
+ devres_add(dev, rcnb);
+ } else {
+ devres_free(rcnb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+ *crc = *buf++;
+ size--;
+
+ while (size--)
+ *crc += *buf++;
+
+ *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+ const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+ /*
+ * While the rest of the wire protocol is little-endian,
+ * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+ */
+ put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+ while (n--) {
+ const unsigned char byte = *src++;
+
+ switch (byte) {
+ case RAVE_SP_STX:
+ case RAVE_SP_ETX:
+ case RAVE_SP_DLE:
+ *dest++ = RAVE_SP_DLE;
+ /* FALLTHROUGH */
+ default:
+ *dest++ = byte;
+ }
+ }
+
+ return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+ unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+ unsigned char *dest = frame;
+ size_t length;
+
+ if (WARN_ON(checksum_length > sizeof(crc)))
+ return -ENOMEM;
+
+ if (WARN_ON(data_size > sizeof(frame)))
+ return -ENOMEM;
+
+ sp->variant->checksum->subroutine(data, data_size, crc);
+
+ *dest++ = RAVE_SP_STX;
+ dest = stuff(dest, data, data_size);
+ dest = stuff(dest, crc, checksum_length);
+ *dest++ = RAVE_SP_ETX;
+
+ length = dest - frame;
+
+ print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+ 16, 1, frame, length, false);
+
+ return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+ /*
+ * There isn't a single rule that describes command code ->
+ * ACK code transformation, but, going through various
+ * versions of ICDs, there appear to be three distinct groups
+ * that can be described by simple transformation.
+ */
+ switch (command) {
+ case 0xA0 ... 0xBE:
+ /*
+ * Commands implemented by firmware found in RDU1 and
+ * older devices all seem to obey the following rule
+ */
+ return command + 0x20;
+ case 0xE0 ... 0xEF:
+ /*
+ * Events emitted by all versions of the firmare use
+ * least significant bit to get an ACK code
+ */
+ return command | 0x01;
+ default:
+ /*
+ * Commands implemented by firmware found in RDU2 are
+ * similar to "old" commands, but they use slightly
+ * different offset
+ */
+ return command + 0x40;
+ }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size)
+{
+ struct rave_sp_reply reply = {
+ .data = reply_data,
+ .length = reply_data_size,
+ .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+ };
+ unsigned char *data = __data;
+ int command, ret = 0;
+ u8 ackid;
+
+ command = sp->variant->cmd.translate(data[0]);
+ if (command < 0)
+ return command;
+
+ ackid = atomic_inc_return(&sp->ackid);
+ reply.ackid = ackid;
+ reply.code = rave_sp_reply_code((u8)command),
+
+ mutex_lock(&sp->bus_lock);
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = &reply;
+ mutex_unlock(&sp->reply_lock);
+
+ data[0] = command;
+ data[1] = ackid;
+
+ rave_sp_write(sp, data, data_size);
+
+ if (!wait_for_completion_timeout(&reply.received, HZ)) {
+ dev_err(&sp->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = NULL;
+ mutex_unlock(&sp->reply_lock);
+ }
+
+ mutex_unlock(&sp->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ u8 cmd[] = {
+ [0] = rave_sp_reply_code(data[0]),
+ [1] = data[1],
+ };
+
+ rave_sp_write(sp, cmd, sizeof(cmd));
+
+ blocking_notifier_call_chain(&sp->event_notifier_list,
+ rave_sp_action_pack(data[0], data[2]),
+ NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ struct device *dev = &sp->serdev->dev;
+ struct rave_sp_reply *reply;
+ const size_t payload_length = length - 2;
+
+ mutex_lock(&sp->reply_lock);
+ reply = sp->reply;
+
+ if (reply) {
+ if (reply->code == data[0] && reply->ackid == data[1] &&
+ payload_length >= reply->length) {
+ /*
+ * We are relying on memcpy(dst, src, 0) to be a no-op
+ * when handling commands that have a no-payload reply
+ */
+ memcpy(reply->data, &data[2], reply->length);
+ complete(&reply->received);
+ sp->reply = NULL;
+ } else {
+ dev_err(dev, "Ignoring incorrect reply\n");
+ dev_dbg(dev, "Code: expected = 0x%08x received = 0x%08x\n",
+ reply->code, data[0]);
+ dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+ reply->ackid, data[1]);
+ dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+ reply->length, payload_length);
+ }
+ }
+
+ mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+ const unsigned char *data,
+ size_t length)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ const size_t payload_length = length - checksum_length;
+ const u8 *crc_reported = &data[payload_length];
+ struct device *dev = &sp->serdev->dev;
+ u8 crc_calculated[checksum_length];
+
+ print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+ 16, 1, data, length, false);
+
+ if (unlikely(length <= checksum_length)) {
+ dev_warn(dev, "Dropping short frame\n");
+ return;
+ }
+
+ sp->variant->checksum->subroutine(data, payload_length,
+ crc_calculated);
+
+ if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+ dev_warn(dev, "Dropping bad frame\n");
+ return;
+ }
+
+ if (rave_sp_id_is_event(data[0]))
+ rave_sp_receive_event(sp, data, length);
+ else
+ rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev);
+ struct rave_sp_deframer *deframer = &sp->deframer;
+ const unsigned char *src = buf;
+ const unsigned char *end = buf + size;
+
+ while (src < end) {
+ const unsigned char byte = *src++;
+
+ switch (deframer->state) {
+ case RAVE_SP_EXPECT_SOF:
+ if (byte == RAVE_SP_STX)
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+
+ case RAVE_SP_EXPECT_DATA:
+ /*
+ * Treat special byte values first
+ */
+ switch (byte) {
+ case RAVE_SP_ETX:
+ rave_sp_receive_frame(sp,
+ deframer->data,
+ deframer->length);
+ /*
+ * Once we extracted a complete frame
+ * out of a stream, we call it done
+ * and proceed to bailing out while
+ * resetting the framer to initial
+ * state, regardless if we've consumed
+ * all of the stream or not.
+ */
+ goto reset_framer;
+ case RAVE_SP_STX:
+ dev_warn(dev, "Bad frame: STX before ETX\n");
+ /*
+ * If we encounter second "start of
+ * the frame" marker before seeing
+ * corresponding "end of frame", we
+ * reset the framer and ignore both:
+ * frame started by first SOF and
+ * frame started by current SOF.
+ *
+ * NOTE: The above means that only the
+ * frame started by third SOF, sent
+ * after this one will have a chance
+ * to get throught.
+ */
+ goto reset_framer;
+ case RAVE_SP_DLE:
+ deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+ /*
+ * If we encounter escape sequence we
+ * need to skip it and collect the
+ * byte that follows. We do it by
+ * forcing the next iteration of the
+ * encompassing while loop.
+ */
+ continue;
+ }
+ /*
+ * For the rest of the bytes, that are not
+ * speical snoflakes, we do the same thing
+ * that we do to escaped data - collect it in
+ * deframer buffer
+ */
+
+ /* FALLTHROUGH */
+
+ case RAVE_SP_EXPECT_ESCAPED_DATA:
+ deframer->data[deframer->length++] = byte;
+
+ if (deframer->length == sizeof(deframer->data)) {
+ dev_warn(dev, "Bad frame: Too long\n");
+ /*
+ * If the amount of data we've
+ * accumulated for current frame so
+ * far starts to exceed the capacity
+ * of deframer's buffer, there's
+ * nothing else we can do but to
+ * discard that data and start
+ * assemblying a new frame again
+ */
+ goto reset_framer;
+ }
+
+ /*
+ * We've extracted out special byte, now we
+ * can go back to regular data collecting
+ */
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is throught consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+
+reset_framer:
+ /*
+ * NOTE: A number of codepaths that will drop us here will do
+ * so before consuming all 'size' bytes of the data passed by
+ * serdev layer. We rely on the fact that serdev layer will
+ * re-execute this handler with the remainder of the Rx bytes
+ * once we report actual number of bytes that we processed.
+ */
+ deframer->state = RAVE_SP_EXPECT_SOF;
+ deframer->length = 0;
+
+ return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_STATUS &&
+ command <= RAVE_SP_CMD_CONTROL_EVENTS)
+ return command;
+
+ return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+ command <= RAVE_SP_CMD_GET_GPIO_STATE)
+ return command;
+
+ if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+ /*
+ * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+ * different from that for RDU1 and it is set to 0x28.
+ */
+ return 0x28;
+ }
+
+ return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+ /*
+ * All of the following command codes were taken from "Table :
+ * Communications Protocol Message Types" in section 3.3
+ * "MESSAGE TYPES" of Rave PIC24 ICD.
+ */
+ switch (command) {
+ case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+ return 0x11;
+ case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+ return 0x12;
+ case RAVE_SP_CMD_BOOT_SOURCE:
+ return 0x14;
+ case RAVE_SP_CMD_SW_WDT:
+ return 0x1C;
+ case RAVE_SP_CMD_RESET:
+ return 0x1E;
+ case RAVE_SP_CMD_RESET_REASON:
+ return 0x1F;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+ .length = 1,
+ .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+ .length = 2,
+ .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_default_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_rdu1_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+ .checksum = &rave_sp_checksum_ccitt,
+ .cmd = {
+ .translate = rave_sp_rdu2_cmd_translate,
+ },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+ { .compatible = "zii,rave-sp-niu", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-esb", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1 },
+ { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2 },
+ { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+ .receive_buf = rave_sp_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp;
+ u32 baud;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+ dev_err(dev,
+ "'current-speed' is not specified in device node\n");
+ return -EINVAL;
+ }
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->serdev = serdev;
+ dev_set_drvdata(dev, sp);
+
+ sp->variant = of_device_get_match_data(dev);
+ if (!sp->variant)
+ return -ENODEV;
+
+ mutex_init(&sp->bus_lock);
+ mutex_init(&sp->reply_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+ serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, baud);
+
+ return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+ .probe = rave_sp_probe,
+ .driver = {
+ .name = "rave-sp",
+ .of_match_table = rave_sp_dt_ids,
+ },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 075330a25f61..a00f99f36559 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-lptimer.h>
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a6675a449409..1d347e5dfa79 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-timers.h>
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b93fe4c4957a..7eaa40bc703f 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -13,6 +13,7 @@
*/
#include <linux/err.h>
+#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -87,6 +88,24 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (ret)
reg_io_width = 4;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ syscon_config.use_hwlock = true;
+ syscon_config.hwlock_id = ret;
+ syscon_config.hwlock_mode = HWLOCK_IRQSTATE;
+ } else if (ret < 0) {
+ switch (ret) {
+ case -ENOENT:
+ /* Ignore missing hwlock, it's optional. */
+ break;
+ default:
+ pr_err("Failed to retrieve valid hwlock: %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto err_regmap;
+ }
+ }
+
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0f3fab47fe48..3cd958a31f36 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -124,7 +124,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct ti_tscadc_dev *tscadc;
struct resource *res;
struct clk *clk;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node;
struct mfd_cell *cell;
struct property *prop;
const __be32 *cur;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index 83af78c1b0eb..ebf54cc28f7a 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -9,6 +9,26 @@
#include <linux/export.h>
#include <linux/mfd/tmio.h>
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
{
/* Enable the MMC/SD Control registers */
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index da16bf45fab4..dc94ffc6321a 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->codec)
return true;
- if (of_find_node_by_name(node, "codec"))
+ node = of_get_child_by_name(parent, "codec");
+ if (node) {
+ of_node_put(node);
return true;
+ }
return false;
}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d36ba0..dd19f17a1b63 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
};
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
{
-#ifdef CONFIG_OF
- if (of_find_node_by_name(node, "vibra"))
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "vibra");
+ if (node) {
+ of_node_put(node);
return true;
-#endif
+ }
+
return false;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f1a5c2357b14..7c0fa24f9067 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config MISC_RTSX
+ tristate
+ default MISC_RTSX_PCI || MISC_RTSX_USB
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5ca5f64df478..8d8cc096063b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644
index 000000000000..69e815e32a8c
--- /dev/null
+++ b/drivers/misc/cardreader/Kconfig
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+ Realtek card readers support access to many types of memory cards,
+ such as Memory Stick, Memory Stick Pro, Secure Digital and
+ MultiMediaCard.
+
+config MISC_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644
index 000000000000..9fabfcc6fa7a
--- /dev/null
+++ b/drivers/misc/cardreader/Makefile
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/mfd/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index b3ae6592014a..434fd070d3e3 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5209.c b/drivers/misc/cardreader/rts5209.c
index b95beecf767f..ce68c48d8ec9 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5227.c b/drivers/misc/cardreader/rts5227.c
index ff296a4bf3d2..024dcba8d6c8 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9ed9dc84eac8..9119261337cc 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 7fcf37ba922c..dbe013abdb83 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
@@ -738,4 +738,3 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
}
-
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644
index 000000000000..07cb93abf685
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.c
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Steven FENG <steven_feng@realsil.com.cn>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[6][3] = {
+ {0x94, 0x94, 0x94},
+ {0x11, 0x11, 0x18},
+ {0x55, 0x55, 0x5C},
+ {0x94, 0x94, 0x94},
+ {0x94, 0x94, 0x94},
+ {0xFF, 0xFF, 0xFF},
+ };
+ u8 driving_1v8[6][3] = {
+ {0x9A, 0x89, 0x89},
+ {0xC4, 0xC4, 0xC4},
+ {0x3C, 0x3C, 0x3C},
+ {0x9B, 0x99, 0x99},
+ {0x9A, 0x89, 0x89},
+ {0xFE, 0xFE, 0xFE},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ msleep(20);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ sd_set_sample_push_timing_sd30(pcr);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+ return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_17);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5260_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rts5260_stop_cmd(pcr);
+ rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5260_card_before_power_off(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWEROFF);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_400mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_THD_MASK,
+ RTS5260_DVIO_OCP_THD_350);
+
+ rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_210);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN, 0);
+ }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+ val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_sd_power_off_card3v3(pcr);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_ms_power_off_card3v3(pcr);
+
+ if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ }
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_init_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+ CLK_PM_EN, CLK_PM_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ PWR_GATE_EN, PWR_GATE_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+ PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+ U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+ OBFF_EN_MASK, OBFF_DISABLE);
+
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+ int lss_l1_1, lss_l1_2;
+
+ lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+ lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+ if (lss_l1_2) {
+ pcr_dbg(pcr, "Set parameters for L1.2.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_2_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_2_PD_FE_EN);
+ } else if (lss_l1_1) {
+ pcr_dbg(pcr, "Set parameters for L1.1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_1_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_1_PD_FE_EN);
+ } else {
+ pcr_dbg(pcr, "Set parameters for L1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_0_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_0_PD_FE_EN);
+ }
+
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ /*Option cut APHY*/
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+ 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+ 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+ 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+ 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+ /*CDR DEC*/
+ rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+ /*PWMPFM*/
+ rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+ 0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+ /*No Power Saving WA*/
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+ 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 lval;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ rts5260_pwr_saving_setting(pcr);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rts5260_init_from_cfg(pcr);
+
+ /* force no MDIO*/
+ rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+ 0xFF, RTS5260_MIMO_DISABLE);
+ /*Modify SDVCC Tune Default Parameters!*/
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rts5260_init_hw(pcr);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .turn_on_led = rts5260_turn_on_led,
+ .turn_off_led = rts5260_turn_off_led,
+ .extra_init_hw = rts5260_extra_init_hw,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts5260_card_power_on,
+ .card_power_off = rts5260_card_power_off,
+ .switch_output_voltage = rts5260_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+ .stop_cmd = rts5260_stop_cmd,
+ .set_aspm = rts5260_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5260_enable_ocp,
+ .disable_ocp = rts5260_disable_ocp,
+ .init_ocp = rts5260_init_ocp,
+ .process_ocp = rts5260_process_ocp,
+ .get_ocpstat = rts5260_get_ocpstat,
+ .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5260_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+ pcr->ops = &rts5260_pcr_ops;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+ option->ocp_en = 1;
+ if (option->ocp_en)
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+ option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644
index 000000000000..53a1411c8868
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.h
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL 0xFF73
+#define RTS5260_DVCC_OCP_EN (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVCC_POWERON (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL 0xFF75
+#define RTS5260_DVIO_OCP_EN (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVIO_POWERON (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DV331812_CFG 0xFF71
+#define RTS5260_DV331812_OCP_EN (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DV331812_POWERON (0x01 << 3)
+#define RTS5260_DV331812_SEL (0x01 << 2)
+#define RTS5260_DV331812_VDD1 (0x01 << 2)
+#define RTS5260_DV331812_VDD2 (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4)
+
+#endif
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 590fb9aad77d..fd09b0960097 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -29,7 +29,7 @@
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
@@ -62,6 +62,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -334,6 +335,9 @@ EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
+ if (pcr->ops->stop_cmd)
+ return pcr->ops->stop_cmd(pcr);
+
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
@@ -826,7 +830,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
/* Wait SSC clock stable */
- udelay(10);
+ udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
@@ -963,6 +967,20 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->process_ocp)
+ pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+ if (pcr->option.ocp_en)
+ rtsx_pci_process_ocp(pcr);
+
+ return 0;
+}
+
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
@@ -987,6 +1005,9 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
+ if (int_reg & SD_OC_INT)
+ rtsx_pci_process_ocp_interrupt(pcr);
+
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
@@ -1119,6 +1140,102 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
}
#endif
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->enable_ocp)
+ pcr->ops->enable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->disable_ocp)
+ pcr->ops->disable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->init_ocp) {
+ pcr->ops->init_ocp(pcr);
+ } else {
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (option->ocp_en) {
+ u8 val = option->sd_400mA_ocp_thd;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+ rtsx_pci_write_register(pcr, REG_OCPPARA1,
+ SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+ rtsx_pci_write_register(pcr, REG_OCPPARA2,
+ SD_OCP_THD_MASK, val);
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ /* OC power down */
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
+ }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ if (pcr->ops->get_ocpstat)
+ return pcr->ops->get_ocpstat(pcr, val);
+ else
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->clear_ocpstat) {
+ pcr->ops->clear_ocpstat(pcr);
+ } else {
+ u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+ msleep(50);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+ return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+ return 0;
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -1189,6 +1306,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5250:
case PID_524A:
case PID_525A:
+ case PID_5260:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1265,6 +1383,9 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+ case 0x5260:
+ rts5260_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1543,6 +1664,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
rtsx_pci_power_off(pcr, HOST_ENTER_S1);
pci_disable_device(pcidev);
+ free_irq(pcr->irq, (void *)pcr);
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
}
#else /* CONFIG_PM */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ec784e04fe20..6ea1655db0bb 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -22,7 +22,7 @@
#ifndef __RTSX_PCR_H
#define __RTSX_PCR_H
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
@@ -44,6 +44,8 @@
#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
+#define SSC_CLOCK_STABLE_WAIT 130
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -57,6 +59,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr);
void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -99,5 +102,12 @@ do { \
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59d61b04c197..b97903ff1a72 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -23,7 +23,7 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 7c11bad5cded..753b1a698fc4 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -427,7 +427,7 @@ int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
return afu_mmap(file, vm);
}
EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
{
return afu_poll(file, poll);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index e46a4062904a..a798c2ccd67d 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1081,7 +1081,7 @@ int afu_open(struct inode *inode, struct file *file);
int afu_release(struct inode *inode, struct file *file);
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int afu_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 76c0b0ca9388..90341ccda9bd 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -354,10 +354,10 @@ static inline bool ctx_event_pending(struct cxl_context *ctx)
return false;
}
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
- int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 512a4897dbf6..7fd0bdc1436a 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -54,7 +54,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
}
- set_dma_ops(&dev->dev, &dma_direct_ops);
+ set_dma_ops(&dev->dev, &dma_nommu_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
return _cxl_pci_associate_default_context(dev, afu);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 305a7a464d09..4d63ac8a82e0 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
@@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->chip.byte_len)
return -EINVAL;
- client = at24_translate_offset(at24, &off);
-
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
status = at24->read_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
@@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->chip.byte_len)
return -EINVAL;
- client = at24_translate_offset(at24, &off);
-
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.reg_read = at24_read;
at24->nvmem_config.reg_write = at24_write;
at24->nvmem_config.priv = at24;
- at24->nvmem_config.stride = 4;
+ at24->nvmem_config.stride = 1;
at24->nvmem_config.word_size = 1;
at24->nvmem_config.size = chip.byte_len;
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 097e3092c158..95ce3e891b1b 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -514,7 +514,7 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
return err ? -EFAULT : len;
}
-static unsigned int ilo_poll(struct file *fp, poll_table *wait)
+static __poll_t ilo_poll(struct file *fp, poll_table *wait)
{
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 8d53609861d8..e49888eab87d 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -651,7 +651,7 @@ out:
return retval;
}
-static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e825f013e54e..505b710291e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -542,12 +542,12 @@ static long mei_compat_ioctl(struct file *file,
*
* Return: poll mask
*/
-static unsigned int mei_poll(struct file *file, poll_table *wait)
+static __poll_t mei_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
bool notify_en;
if (WARN_ON(!cl || !cl->dev))
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b08b5c..8a3e48ec37dd 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -1311,10 +1311,10 @@ static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
spin_lock(&ep->lock);
}
-unsigned int
+__poll_t
__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev_dbg(scif_info.mdev.this_device,
"SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
@@ -1389,7 +1389,8 @@ scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
{
struct poll_wqueues table;
poll_table *pt;
- int i, mask, count = 0, timed_out = timeout_msecs == 0;
+ int i, count = 0, timed_out = timeout_msecs == 0;
+ __poll_t mask;
u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
: msecs_to_jiffies(timeout_msecs);
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index 1771d7a9b8d0..f39b663da287 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -203,7 +203,7 @@ void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
int __scif_flush(scif_epd_t epd);
int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-unsigned int __scif_pollfd(struct file *f, poll_table *wait,
+__poll_t __scif_pollfd(struct file *f, poll_table *wait,
struct scif_endpt *ep);
int __scif_pin_pages(void *addr, size_t len, int *out_prot,
int map_flags, scif_pinned_pages_t *pages);
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
index f7e826142a72..5c2a57ae4f85 100644
--- a/drivers/misc/mic/scif/scif_fd.c
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -41,7 +41,7 @@ static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
return scif_mmap(vma, priv);
}
-static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
+static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
{
struct scif_endpt *priv = f->private_data;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index fed992e2c258..4120ed8f0cae 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -1023,10 +1023,10 @@ __unlock_ret:
* in the card->host (TX) path, when userspace is unblocked by poll it
* must drain all available descriptors or it can stall.
*/
-static unsigned int vop_poll(struct file *f, poll_table *wait)
+static __poll_t vop_poll(struct file *f, poll_table *wait)
{
struct vop_vdev *vdev = f->private_data;
- int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&vdev->vdev_mutex);
if (vop_vdev_inited(vdev)) {
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30754927fd80..8fa68cf308e0 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -256,10 +256,10 @@ static int phantom_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int phantom_poll(struct file *file, poll_table *wait)
+static __poll_t phantom_poll(struct file *file, poll_table *wait)
{
struct phantom_device *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
poll_wait(file, &dev->wait, wait);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index eda38cbe8530..41f2a9f6851d 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 9918eda0e05f..a3454eb56fbf 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -258,6 +258,7 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops gru_mmuops = {
+ .flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
.invalidate_range_start = gru_invalidate_range_start,
.invalidate_range_end = gru_invalidate_range_end,
.release = gru_release,
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 8a16a26e9658..6640e7651533 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -166,11 +166,11 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
* This is used to wake up the VMX when a VMCI call arrives, or
* to wake up select() or poll() at the next clock tick.
*/
-static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
struct vmci_ctx *context = vmci_host_dev->context;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
/* Check for VMCI calls to this VM context. */
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8af5c2672f71..0339538c182d 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -129,23 +129,6 @@
* *_MEM state, and vice versa.
*/
-/*
- * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
- * types are passed around to enqueue and dequeue routines. Note that
- * often the functions passed are simply wrappers around memcpy
- * itself.
- *
- * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
- * there's an unused last parameter for the hosted side. In
- * ESX, that parameter holds a buffer type.
- */
-typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
- u64 queue_offset, const void *src,
- size_t src_offset, size_t size);
-typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size);
-
/* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if {
struct mutex __mutex; /* Protects the queue. */
@@ -351,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
-static int __qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src,
- size_t size,
- bool is_iovec)
+static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
+ u64 queue_offset,
+ struct iov_iter *from,
+ size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
@@ -380,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
else
to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = (struct msghdr *)src;
- int err;
-
- /* The iovec will track bytes_copied internally. */
- err = memcpy_from_msg((u8 *)va + page_offset,
- msg, to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)va + page_offset,
- (u8 *)src + bytes_copied, to_copy);
+ if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
+ from)) {
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
}
-
bytes_copied += to_copy;
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@@ -411,11 +382,9 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
-static int __qp_memcpy_from_queue(void *dest,
- const struct vmci_queue *queue,
- u64 queue_offset,
- size_t size,
- bool is_iovec)
+static int qp_memcpy_from_queue_iter(struct iov_iter *to,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
{
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
size_t bytes_copied = 0;
@@ -427,6 +396,7 @@ static int __qp_memcpy_from_queue(void *dest,
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
+ int err;
if (kernel_if->host)
va = kmap(kernel_if->u.h.page[page_index]);
@@ -440,23 +410,12 @@ static int __qp_memcpy_from_queue(void *dest,
else
to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = dest;
- int err;
-
- /* The iovec will track bytes_copied internally. */
- err = memcpy_to_msg(msg, (u8 *)va + page_offset,
- to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)dest + bytes_copied,
- (u8 *)va + page_offset, to_copy);
+ err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
+ if (err != to_copy) {
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
}
-
bytes_copied += to_copy;
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@@ -569,54 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
return VMCI_SUCCESS;
}
-static int qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src, size_t src_offset, size_t size)
-{
- return __qp_memcpy_to_queue(queue, queue_offset,
- (u8 *)src + src_offset, size, false);
-}
-
-static int qp_memcpy_from_queue(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
-{
- return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
- queue, queue_offset, size, false);
-}
-
-/*
- * Copies from a given iovec from a VMCI Queue.
- */
-static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
- u64 queue_offset,
- const void *msg,
- size_t src_offset, size_t size)
-{
-
- /*
- * We ignore src_offset because src is really a struct iovec * and will
- * maintain offset internally.
- */
- return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
-}
-
-/*
- * Copies to a given iovec from a VMCI Queue.
- */
-static int qp_memcpy_from_queue_iov(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
-{
- /*
- * We ignore dest_offset because dest is really a struct iovec * and
- * will maintain offset internally.
- */
- return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
-}
-
/*
* Allocates kernel VA space of specified size plus space for the queue
* and kernel interface. This is different from the guest queue allocator,
@@ -2629,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 produce_q_size,
- const void *buf,
- size_t buf_size,
- vmci_memcpy_to_queue_func memcpy_to_queue)
+ struct iov_iter *from)
{
s64 free_space;
u64 tail;
+ size_t buf_size = iov_iter_count(from);
size_t written;
ssize_t result;
@@ -2654,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
written = (size_t) (free_space > buf_size ? buf_size : free_space);
tail = vmci_q_header_producer_tail(produce_q->q_header);
if (likely(tail + written < produce_q_size)) {
- result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+ result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
} else {
/* Tail pointer wraps around. */
const size_t tmp = (size_t) (produce_q_size - tail);
- result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+ result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
if (result >= VMCI_SUCCESS)
- result = memcpy_to_queue(produce_q, 0, buf, tmp,
+ result = qp_memcpy_to_queue_iter(produce_q, 0, from,
written - tmp);
}
@@ -2690,11 +2600,10 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
struct vmci_queue *consume_q,
const u64 consume_q_size,
- void *buf,
- size_t buf_size,
- vmci_memcpy_from_queue_func memcpy_from_queue,
+ struct iov_iter *to,
bool update_consumer)
{
+ size_t buf_size = iov_iter_count(to);
s64 buf_ready;
u64 head;
size_t read;
@@ -2716,15 +2625,15 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
head = vmci_q_header_consumer_head(produce_q->q_header);
if (likely(head + read < consume_q_size)) {
- result = memcpy_from_queue(buf, 0, consume_q, head, read);
+ result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
} else {
/* Head pointer wraps around. */
const size_t tmp = (size_t) (consume_q_size - head);
- result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+ result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
if (result >= VMCI_SUCCESS)
- result = memcpy_from_queue(buf, tmp, consume_q, 0,
+ result = qp_memcpy_from_queue_iter(to, consume_q, 0,
read - tmp);
}
@@ -3118,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
int buf_type)
{
ssize_t result;
+ struct iov_iter from;
+ struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
- buf, buf_size,
- qp_memcpy_to_queue);
+ &from);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3159,18 +3071,21 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
int buf_type)
{
ssize_t result;
+ struct iov_iter to;
+ struct kvec v = {.iov_base = buf, .iov_len = buf_size};
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, true);
+ &to, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3200,19 +3115,22 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
size_t buf_size,
int buf_type)
{
+ struct iov_iter to;
+ struct kvec v = {.iov_base = buf, .iov_len = buf_size};
ssize_t result;
if (!qpair || !buf)
return VMCI_ERROR_INVALID_ARGS;
+ iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, false);
+ &to, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3253,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
- msg, iov_size,
- qp_memcpy_to_queue_iov);
+ &msg->msg_iter);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3295,9 +3212,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- true);
+ &msg->msg_iter, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3339,9 +3254,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
result = qp_dequeue_locked(qpair->produce_q,
qpair->consume_q,
qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- false);
+ &msg->msg_iter, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ccfa98af1dd3..20135a5de748 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -112,6 +118,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -189,7 +196,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
- blk_cleanup_queue(md->queue.queue);
+ blk_put_queue(md->queue.queue);
ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
@@ -921,14 +928,54 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, bool *gen_err)
+ struct request *req, u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
+ bool done = time_after(jiffies, timeout);
+
err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
@@ -936,25 +983,18 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
return err;
}
- if (status & R1_ERROR) {
- pr_err("%s: %s: error sending status cmd, status %#x\n",
- req->rq_disk->disk_name, __func__, status);
- *gen_err = true;
- }
-
- /* We may rely on the host hw to handle busy detection.*/
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
- hw_busy_detect)
- break;
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s %s\n",
+ if (done) {
+ pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ req->rq_disk->disk_name, __func__, status);
return -ETIMEDOUT;
}
@@ -963,229 +1003,11 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
* so make sure to check both the busy
* indication and the card state.
*/
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ } while (!mmc_blk_in_tran_state(status));
return err;
}
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, bool *gen_err, u32 *stop_status)
-{
- struct mmc_host *host = card->host;
- struct mmc_command cmd = {};
- int err;
- bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
- /*
- * Normally we use R1B responses for WRITE, but in cases where the host
- * has specified a max_busy_timeout we need to validate it. A failure
- * means we need to prevent the host from doing hw busy detection, which
- * is done by converting to a R1 response instead.
- */
- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- if (use_r1b_resp) {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 5);
- if (err)
- return err;
-
- *stop_status = cmd.resp[0];
-
- /* No need to check card status in case of READ. */
- if (rq_data_dir(req) == READ)
- return 0;
-
- if (!mmc_host_is_spi(host) &&
- (*stop_status & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop command, resp %#x\n",
- req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = true;
- }
-
- return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM 3
-#define ERR_RETRY 2
-#define ERR_ABORT 1
-#define ERR_CONTINUE 0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
- bool status_valid, u32 status)
-{
- switch (error) {
- case -EILSEQ:
- /* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
- name, status);
- return ERR_RETRY;
-
- case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
-
- /* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /*
- * If it was a r/w cmd crc error, or illegal command
- * (eg, issued in wrong state) then retry - we should
- * have corrected the state problem above.
- */
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /* Otherwise abort the command */
- return ERR_ABORT;
-
- default:
- /* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
- req->rq_disk->disk_name, error, status);
- return ERR_ABORT;
- }
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state. Essentially, we do this as follows:
- * - Obtain card status. If the first attempt to obtain card status fails,
- * the status word will reflect the failed status cmd, not the failed
- * r/w cmd. If we fail to obtain card status, it suggests we can no
- * longer communicate with the card.
- * - Check the card state. If the card received the cmd but there was a
- * transient problem with the response, it might still be in a data transfer
- * mode. Try to send it a stop command. If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- * transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- * illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
- bool prev_cmd_status_valid = true;
- u32 status, stop_status = 0;
- int err, retry;
-
- if (mmc_card_removed(card))
- return ERR_NOMEDIUM;
-
- /*
- * Try to get card status which indicates both the card state
- * and why there was no response. If the first attempt fails,
- * we can't be sure the returned status is for the r/w command.
- */
- for (retry = 2; retry >= 0; retry--) {
- err = __mmc_send_status(card, &status, 0);
- if (!err)
- break;
-
- /* Re-tune if needed */
- mmc_retune_recheck(card->host);
-
- prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
- req->rq_disk->disk_name, err, retry ? "retry" : "abort");
- }
-
- /* We couldn't get a response from the card. Give up. */
- if (err) {
- /* Check if the card is removed */
- if (mmc_detect_card_removed(card->host))
- return ERR_NOMEDIUM;
- return ERR_ABORT;
- }
-
- /* Flag ECC errors */
- if ((status & R1_CARD_ECC_FAILED) ||
- (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
- (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = true;
-
- /* Flag General errors */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if ((status & R1_ERROR) ||
- (brq->stop.resp[0] & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0], status);
- *gen_err = true;
- }
-
- /*
- * Check the current card state. If it is in some data transfer
- * mode, tell it to stop (and hopefully transition back to TRAN.)
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
- R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card,
- DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
- req, gen_err, &stop_status);
- if (err) {
- pr_err("%s: error %d sending stop command\n",
- req->rq_disk->disk_name, err);
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- return ERR_ABORT;
- }
-
- if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = true;
- }
-
- /* Check for set block count errors */
- if (brq->sbc.error)
- return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
- prev_cmd_status_valid, status);
-
- /* Check for r/w command errors */
- if (brq->cmd.error)
- return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
- prev_cmd_status_valid, status);
-
- /* Data errors */
- if (!brq->stop.error)
- return ERR_CONTINUE;
-
- /* Now for stop errors. These aren't fatal to the transfer. */
- pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->stop.error,
- brq->cmd.resp[0], status);
-
- /*
- * Subsitute in our own stop status as this will give the error
- * state which happened during the execution of the r/w command.
- */
- if (stop_status) {
- brq->stop.resp[0] = stop_status;
- brq->stop.error = 0;
- }
- return ERR_CONTINUE;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1281,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1324,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1394,7 +1216,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1404,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1430,15 +1252,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-#define CMD_ERRORS \
- (R1_OUT_OF_RANGE | /* Command argument out of range */ \
- R1_ADDRESS_ERROR | /* Misaligned address */ \
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
@@ -1481,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
}
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- areq);
- struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mmc_queue_req_to_req(mq_mrq);
- int need_retune = card->host->need_retune;
- bool ecc_err = false;
- bool gen_err = false;
-
- /*
- * sbc.error indicates a problem with the set block count
- * command. No data will have been transferred.
- *
- * cmd.error indicates a problem with the r/w command. No
- * data will have been transferred.
- *
- * stop.error indicates a problem with the stop command. Data
- * may have been transferred, or may still be transferring.
- */
-
- mmc_blk_eval_resp_error(brq);
-
- if (brq->sbc.error || brq->cmd.error ||
- brq->stop.error || brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
- case ERR_RETRY:
- return MMC_BLK_RETRY;
- case ERR_ABORT:
- return MMC_BLK_ABORT;
- case ERR_NOMEDIUM:
- return MMC_BLK_NOMEDIUM;
- case ERR_CONTINUE:
- break;
- }
- }
-
- /*
- * Check for errors relating to the execution of the
- * initial command - such as address errors. No data
- * has been transferred.
- */
- if (brq->cmd.resp[0] & CMD_ERRORS) {
- pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq->cmd.resp[0]);
- return MMC_BLK_ABORT;
- }
-
- /*
- * Everything else is either success, or a data error of some
- * kind. If it was a write, we may have transitioned to
- * program mode, which we have to wait for it to complete.
- */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- int err;
-
- /* Check stop command response */
- if (brq->stop.resp[0] & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0]);
- gen_err = true;
- }
-
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
- &gen_err);
- if (err)
- return MMC_BLK_CMD_ERR;
- }
-
- /* if general error occurs, retry the write operation. */
- if (gen_err) {
- pr_warn("%s: retrying write for general error\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY;
- }
-
- /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
- if (brq->data.error || brq->stop.error) {
- if (need_retune && !brq->retune_retry_done) {
- pr_debug("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
- brq->retune_retry_done = 1;
- return MMC_BLK_RETRY;
- }
- pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req),
- brq->cmd.resp[0], brq->stop.resp[0]);
-
- if (rq_data_dir(req) == READ) {
- if (ecc_err)
- return MMC_BLK_ECC_ERR;
- return MMC_BLK_DATA_ERR;
- } else {
- return MMC_BLK_CMD_ERR;
- }
- }
-
- if (!brq->data.bytes_xfered)
- return MMC_BLK_RETRY;
-
- if (blk_rq_bytes(req) != brq->data.bytes_xfered)
- return MMC_BLK_PARTIAL;
-
- return MMC_BLK_SUCCESS;
-}
-
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p)
@@ -1706,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->areq.mrq = &brq->mrq;
-
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
@@ -1715,6 +1428,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1779,318 +1624,637 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
- mqrq->areq.err_check = mmc_blk_err_check;
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
}
-static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- bool old_req_pending)
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
- bool req_pending;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, req, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ int retries = 0;
+
+ do {
+ u32 status;
int err;
- err = mmc_sd_num_wr_blocks(card, &blocks);
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
if (err)
- req_pending = old_req_pending;
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_blk_in_tran_state(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+ continue;
+
+ retries = 0;
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
else
- req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
- } else {
- req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
- }
- return req_pending;
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, 512));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, 512);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
}
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
- struct request *req,
- struct mmc_queue_req *mqrq)
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
- mq->qcnt--;
+ return !!brq->mrq.sbc;
}
-/**
- * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @req: a new request that want to be started after the current one
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
*/
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
- struct mmc_queue_req *mqrq)
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
- if (!req)
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
return;
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_blk_in_tran_state(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
/*
- * If the card was removed, just cancel everything and return.
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
*/
- if (mmc_card_removed(mq->card)) {
- req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, BLK_STS_IOERR);
- mq->qcnt--; /* FIXME: just set to 0? */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ /* FIXME: Missing single sector read for large sector size */
+ if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
+ brq->data.blocks > 1) {
+ /* Read one sector at a time */
+ mmc_blk_read_single(mq, req);
return;
}
- /* Else proceed and try to restart the current async request */
- mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
- mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq;
- int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
- enum mmc_blk_status status;
- struct mmc_queue_req *mqrq_cur = NULL;
- struct mmc_queue_req *mq_rq;
- struct request *old_req;
- struct mmc_async_req *new_areq;
- struct mmc_async_req *old_areq;
- bool req_pending = true;
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
- if (new_req) {
- mqrq_cur = req_to_mmc_queue_req(new_req);
- mq->qcnt++;
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_start_bkops(mq->card, true);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, req);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
}
- if (!mq->qcnt)
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
return;
- do {
- if (new_req) {
- /*
- * When 4KB native sector is enabled, only 8 blocks
- * multiple read or write is allowed
- */
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
- pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- new_req->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
- return;
- }
+ mutex_lock(&mq->complete_lock);
- mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
- new_areq = &mqrq_cur->areq;
- } else
- new_areq = NULL;
+ if (!mq->complete_req)
+ goto out_unlock;
- old_areq = mmc_start_areq(card->host, new_areq, &status);
- if (!old_areq) {
- /*
- * We have just put the first request into the pipeline
- * and there is nothing more to do until it is
- * complete.
- */
- return;
- }
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
/*
- * An asynchronous request has been completed and we proceed
- * to handle the result of it.
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
*/
- mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
- brq = &mq_rq->brq;
- old_req = mmc_queue_req_to_req(mq_rq);
- type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-
- switch (status) {
- case MMC_BLK_SUCCESS:
- case MMC_BLK_PARTIAL:
- /*
- * A block was successfully transferred.
- */
- mmc_blk_reset_success(md, type);
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(q->queue_lock, flags);
- req_pending = blk_end_request(old_req, BLK_STS_OK,
- brq->data.bytes_xfered);
- /*
- * If the blk_end_request function returns non-zero even
- * though all data has been transferred and no errors
- * were returned by the host controller, it's a bug.
- */
- if (status == MMC_BLK_SUCCESS && req_pending) {
- pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(old_req),
- brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- return;
- }
- break;
- case MMC_BLK_CMD_ERR:
- req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
- if (mmc_blk_reset(md, card->host, type)) {
- if (req_pending)
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- else
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_RETRY:
- retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
- break;
- /* Fall through */
- case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
- break;
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- case MMC_BLK_DATA_ERR: {
- int err;
-
- err = mmc_blk_reset(md, card->host, type);
- if (!err)
- break;
- if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- /* Fall through */
- }
- case MMC_BLK_ECC_ERR:
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warn("%s: retrying using single block read\n",
- old_req->rq_disk->disk_name);
- disable_multi = 1;
- break;
- }
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- req_pending = blk_end_request(old_req, BLK_STS_IOERR,
- brq->data.blksz);
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- default:
- pr_err("%s: Unhandled return value (%d)",
- old_req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ kblockd_schedule_work(&mq->complete_work);
- if (req_pending) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_areq(card->host,
- &mq_rq->areq, NULL);
- mq_rq->brq.retune_retry_done = retune_retry_done;
- }
- } while (req_pending);
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
- mq->qcnt--;
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
}
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
- int ret;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
-
- if (req && !mq->qcnt)
- /* claim host only for the first request */
- mmc_get_card(card, NULL);
+ struct mmc_host *host = card->host;
+ int ret;
ret = mmc_blk_part_switch(card, md->part_type);
- if (ret) {
- if (req) {
- blk_end_request_all(req, BLK_STS_IOERR);
- }
- goto out;
- }
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
- if (req) {
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- /*
- * Complete ongoing async transfer before issuing
- * ioctl()s
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
- /*
- * Complete ongoing async transfer before issuing
- * discard.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
- /*
- * Complete ongoing async transfer before issuing
- * secure erase.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
- /*
- * Complete ongoing async transfer before issuing
- * flush.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
- /* Normal request, just issue it */
- mmc_blk_issue_rw_rq(mq, req);
- card->host->context_info.is_waiting_last_req = false;
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ ret = mmc_blk_cqe_issue_flush(mq, req);
break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
}
- } else {
- /* No request, flushing the pipeline with NULL */
- mmc_blk_issue_rw_rq(mq, NULL);
- card->host->context_info.is_waiting_last_req = false;
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
}
-
-out:
- if (!mq->qcnt)
- mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2156,6 +2320,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->queue.blkdata = md;
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
@@ -2471,10 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- spin_lock_irq(md->queue.queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
- spin_unlock_irq(md->queue.queue->queue_lock);
- blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2623,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
+ kfree(ext_csd);
goto out_free;
}
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636101ef..31153f656f41 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -5,6 +5,16 @@
struct mmc_queue;
struct request;
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7586ff2ad1f1..fc92c6c1c9a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
- mmc_init_context_info(card->host);
-
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91964ce..79a5b985ccf5 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_APACER 0x27
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f0f44f4dd5f..c0ba6d8823b7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -341,6 +341,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ init_completion(&mrq->cmd_completion);
+
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
@@ -361,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_start_request);
-/*
- * mmc_wait_data_done() - done callback for data request
- * @mrq: done data request
- *
- * Wakes up mmc context, passed as a callback to host controller driver
- */
-static void mmc_wait_data_done(struct mmc_request *mrq)
-{
- struct mmc_context_info *context_info = &mrq->host->context_info;
-
- context_info->is_done_rcv = true;
- wake_up_interruptible(&context_info->wait);
-}
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -392,37 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
wait_for_completion(&ongoing_mrq->cmd_completion);
}
-/*
- *__mmc_start_data_req() - starts data request
- * @host: MMC host to start the request
- * @mrq: data request to start
- *
- * Sets the done callback to be called when request is completed by the card.
- * Starts data mmc request execution
- * If an ongoing transfer is already in progress, wait for the command line
- * to become available before sending another command.
- */
-static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- int err;
-
- mmc_wait_ongoing_tfr_cmd(host);
-
- mrq->done = mmc_wait_data_done;
- mrq->host = host;
-
- init_completion(&mrq->cmd_completion);
-
- err = mmc_start_request(host, mrq);
- if (err) {
- mrq->cmd->error = err;
- mmc_complete_cmd(mrq);
- mmc_wait_data_done(mrq);
- }
-
- return err;
-}
-
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -432,8 +389,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- init_completion(&mrq->cmd_completion);
-
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
@@ -650,164 +605,11 @@ EXPORT_SYMBOL(mmc_cqe_recovery);
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
- if (host->areq)
- return host->context_info.is_done_rcv;
- else
- return completion_done(&mrq->completion);
+ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
- * mmc_pre_req - Prepare for a new request
- * @host: MMC host to prepare command
- * @mrq: MMC request to prepare for
- *
- * mmc_pre_req() is called in prior to mmc_start_req() to let
- * host prepare for the new request. Preparation of a request may be
- * performed while another request is running on the host.
- */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- if (host->ops->pre_req)
- host->ops->pre_req(host, mrq);
-}
-
-/**
- * mmc_post_req - Post process a completed request
- * @host: MMC host to post process command
- * @mrq: MMC request to post process for
- * @err: Error, if non zero, clean up any resources made in pre_req
- *
- * Let the host post process a completed request. Post processing of
- * a request may be performed while another reuqest is running.
- */
-static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
- int err)
-{
- if (host->ops->post_req)
- host->ops->post_req(host, mrq, err);
-}
-
-/**
- * mmc_finalize_areq() - finalize an asynchronous request
- * @host: MMC host to finalize any ongoing request on
- *
- * Returns the status of the ongoing asynchronous request, but
- * MMC_BLK_SUCCESS if no request was going on.
- */
-static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
-{
- struct mmc_context_info *context_info = &host->context_info;
- enum mmc_blk_status status;
-
- if (!host->areq)
- return MMC_BLK_SUCCESS;
-
- while (1) {
- wait_event_interruptible(context_info->wait,
- (context_info->is_done_rcv ||
- context_info->is_new_req));
-
- if (context_info->is_done_rcv) {
- struct mmc_command *cmd;
-
- context_info->is_done_rcv = false;
- cmd = host->areq->mrq->cmd;
-
- if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- status = host->areq->err_check(host->card,
- host->areq);
- break; /* return status */
- } else {
- mmc_retune_recheck(host);
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host),
- cmd->opcode, cmd->error);
- cmd->retries--;
- cmd->error = 0;
- __mmc_start_request(host, host->areq->mrq);
- continue; /* wait for done/new event again */
- }
- }
-
- return MMC_BLK_NEW_REQUEST;
- }
-
- mmc_retune_release(host);
-
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
- mmc_start_bkops(host->card, true);
- }
-
- return status;
-}
-
-/**
- * mmc_start_areq - start an asynchronous request
- * @host: MMC host to start command
- * @areq: asynchronous request to start
- * @ret_stat: out parameter for status
- *
- * Start a new MMC custom command request for a host.
- * If there is on ongoing async request wait for completion
- * of that request and start the new one and return.
- * Does not wait for the new request to complete.
- *
- * Returns the completed request, NULL in case of none completed.
- * Wait for the an ongoing request (previoulsy started) to complete and
- * return the completed request. If there is no ongoing request, NULL
- * is returned without waiting. NULL is not an error condition.
- */
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
-{
- enum mmc_blk_status status;
- int start_err = 0;
- struct mmc_async_req *previous = host->areq;
-
- /* Prepare a new request */
- if (areq)
- mmc_pre_req(host, areq->mrq);
-
- /* Finalize previous request */
- status = mmc_finalize_areq(host);
- if (ret_stat)
- *ret_stat = status;
-
- /* The previous request is still going on... */
- if (status == MMC_BLK_NEW_REQUEST)
- return NULL;
-
- /* Fine so far, start the new request! */
- if (status == MMC_BLK_SUCCESS && areq)
- start_err = __mmc_start_data_req(host, areq->mrq);
-
- /* Postprocess the old request at this point */
- if (host->areq)
- mmc_post_req(host, host->areq->mrq, 0);
-
- /* Cancel a prepared request if it was not started. */
- if ((status != MMC_BLK_SUCCESS || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- if (status != MMC_BLK_SUCCESS)
- host->areq = NULL;
- else
- host->areq = areq;
-
- return previous;
-}
-EXPORT_SYMBOL(mmc_start_areq);
-
-/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
@@ -2959,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
@@ -2994,22 +2804,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
}
#endif
-/**
- * mmc_init_context_info() - init synchronization context
- * @host: mmc host
- *
- * Init struct context_info needed to implement asynchronous
- * request mechanism, used by mmc core, host driver and mmc requests
- * supplier.
- */
-void mmc_init_context_info(struct mmc_host *host)
-{
- host->context_info.is_new_req = false;
- host->context_info.is_done_rcv = false;
- host->context_info.is_waiting_last_req = false;
- init_waitqueue_head(&host->context_info.wait);
-}
-
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 71e6c6d7ceb7..d6303d69071b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
msleep(ms);
- }
}
void mmc_rescan(struct work_struct *work);
@@ -91,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
-void mmc_init_context_info(struct mmc_host *host);
-
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
@@ -110,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-struct mmc_async_req;
-
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
-
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
int mmc_can_erase(struct mmc_card *card);
@@ -152,4 +142,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_cqe_recovery(struct mmc_host *host);
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb689a1065ed..06ec19b5bf9f 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23;
}
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
static inline int mmc_boot_partition_access(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
@@ -74,6 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card)
return card->host->ios.enhanced_strobe;
}
-
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d209fb466979..208a762b87ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1290,7 +1290,7 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
- int card_drv_type, drive_strength, drv_type;
+ int card_drv_type, drive_strength, drv_type = 0;
int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 478869805b96..ef18daeaa4cc 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -101,7 +101,7 @@ struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
- struct timespec ts;
+ struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
@@ -171,11 +171,6 @@ struct mmc_test_multiple_rw {
enum mmc_test_prep_media prepare;
};
-struct mmc_test_async_req {
- struct mmc_async_req areq;
- struct mmc_test_card *test;
-};
-
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -515,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
/*
* Calculate transfer rate in bytes per second.
*/
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
- ns = ts->tv_sec;
- ns *= 1000000000;
- ns += ts->tv_nsec;
-
+ ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
@@ -542,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
- unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -567,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
- struct timespec *ts1, struct timespec *ts2)
+ struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
@@ -591,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
- unsigned int count, struct timespec *ts1,
- struct timespec *ts2)
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
- (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
@@ -741,30 +733,6 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_test_async_req *test_async =
- container_of(areq, struct mmc_test_async_req, areq);
- int ret;
-
- mmc_test_wait_busy(test_async->test);
-
- /*
- * FIXME: this would earlier just casts a regular error code,
- * either of the kernel type -ERRORCODE or the local test framework
- * RESULT_* errorcode, into an enum mmc_blk_status and return as
- * result check. Instead, convert it to some reasonable type by just
- * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
- * If possible, a reasonable error code should be returned.
- */
- ret = mmc_test_check_result(test_async->test, areq->mrq);
- if (ret)
- return MMC_BLK_CMD_ERR;
-
- return MMC_BLK_SUCCESS;
-}
-
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -831,6 +799,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void)
return rq;
}
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len,
@@ -838,17 +845,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned blksz, int write, int count)
{
struct mmc_test_req *rq1, *rq2;
- struct mmc_test_async_req test_areq[2];
- struct mmc_async_req *done_areq;
- struct mmc_async_req *cur_areq = &test_areq[0].areq;
- struct mmc_async_req *other_areq = &test_areq[1].areq;
- enum mmc_blk_status status;
+ struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
- test_areq[0].test = test;
- test_areq[1].test = test;
-
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
@@ -856,33 +856,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
goto err;
}
- cur_areq->mrq = &rq1->mrq;
- cur_areq->err_check = mmc_test_check_result_async;
- other_areq->mrq = &rq2->mrq;
- other_areq->err_check = mmc_test_check_result_async;
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
- done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
-
- if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
- ret = RESULT_FAIL;
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
+ blksz, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
goto err;
- }
- if (done_areq)
- mmc_test_req_reset(container_of(done_areq->mrq,
- struct mmc_test_req, mrq));
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
- swap(cur_areq, other_areq);
+ swap(mrq, prev_mrq);
dev_addr += blocks;
}
- done_areq = mmc_start_areq(test->card->host, NULL, &status);
- if (status != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
-
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
@@ -1449,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret = 0;
int i;
struct mmc_test_area *t = &test->area;
@@ -1475,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
dev_addr, t->blocks, 512, write, count);
@@ -1489,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
@@ -1747,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1758,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
@@ -1779,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1818,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
@@ -1826,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1864,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1882,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
@@ -1890,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
@@ -1912,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
@@ -1921,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
- getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
@@ -1998,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
@@ -2025,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
@@ -2033,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
return ret;
dev_addr += ssz;
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
@@ -2328,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test)
int err;
err = mmc_hw_reset(host);
- if (!err)
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
return RESULT_OK;
- else if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
+ }
return RESULT_FAIL;
}
@@ -2356,11 +2355,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
- enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2373,9 +2370,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- test_areq.areq.mrq = mrq;
- test_areq.areq.err_check = mmc_test_check_result_async;
-
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2388,11 +2382,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_areq(host, &test_areq.areq, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS) {
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
goto out_free;
- }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2426,9 +2418,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_areq(host, NULL, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
@@ -3066,10 +3056,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
- (unsigned long)tr->ts.tv_sec,
- (unsigned long)tr->ts.tv_nsec,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4f33d277b125..421fab7250ac 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -22,100 +22,147 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "host.h"
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
- struct mmc_queue *mq = q->queuedata;
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
- if (mq && mmc_card_removed(mq->card))
- return BLKPREP_KILL;
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
- req->rq_flags |= RQF_DONTPREP;
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
- return BLKPREP_OK;
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
}
-static int mmc_queue_thread(void *d)
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
{
- struct mmc_queue *mq = d;
- struct request_queue *q = mq->queue;
- struct mmc_context_info *cntx = &mq->card->host->context_info;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
- current->flags |= PF_MEMALLOC;
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
- down(&mq->thread_sem);
- do {
- struct request *req;
+ if (mq->use_cqe)
+ return mmc_cqe_issue_type(host, req);
- spin_lock_irq(q->queue_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- req = blk_fetch_request(q);
- mq->asleep = false;
- cntx->is_waiting_last_req = false;
- cntx->is_new_req = false;
- if (!req) {
- /*
- * Dispatch queue is empty so set flags for
- * mmc_request_fn() to wake us up.
- */
- if (mq->qcnt)
- cntx->is_waiting_last_req = true;
- else
- mq->asleep = true;
- }
- spin_unlock_irq(q->queue_lock);
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
- if (req || mq->qcnt) {
- set_current_state(TASK_RUNNING);
- mmc_blk_issue_rq(mq, req);
- cond_resched();
- } else {
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- up(&mq->thread_sem);
- schedule();
- down(&mq->thread_sem);
- }
- } while (1);
- up(&mq->thread_sem);
+ return MMC_ISSUE_SYNC;
+}
- return 0;
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
}
-/*
- * Generic MMC request handler. This is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
- struct request *req;
- struct mmc_context_info *cntx;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
- if (!mq) {
- while ((req = blk_fetch_request(q)) != NULL) {
- req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, BLK_STS_IOERR);
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ __mmc_cqe_recovery_notifier(mq);
+ return BLK_EH_RESET_TIMER;
}
- return;
+ /* No timeout */
+ return BLK_EH_HANDLED;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
}
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+ int ret;
- cntx = &mq->card->host->context_info;
+ spin_lock_irqsave(q->queue_lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
+ if (mq->recovery_needed || !mq->use_cqe)
+ ret = BLK_EH_RESET_TIMER;
+ else
+ ret = mmc_cqe_timed_out(req);
- if (mq->asleep)
- wake_up_process(mq->thread);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return ret;
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(q->queue_lock);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@@ -154,11 +201,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
-static int mmc_init_request(struct request_queue *q, struct request *req,
- gfp_t gfp)
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
@@ -177,6 +223,131 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+/*
+ * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
+ * will not be dispatched in parallel.
+ */
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (mq->recovery_needed) {
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ spin_unlock_irq(q->queue_lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -196,124 +367,139 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- /* Initialize thread_sem even if it is not used */
- sema_init(&mq->thread_sem, 1);
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
}
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
+ const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
- struct mmc_host *host = card->host;
- int ret = -ENOMEM;
-
- mq->card = card;
- mq->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mq->queue)
- return -ENOMEM;
- mq->queue->queue_lock = lock;
- mq->queue->request_fn = mmc_request_fn;
- mq->queue->init_rq_fn = mmc_init_request;
- mq->queue->exit_rq_fn = mmc_exit_request;
- mq->queue->cmd_size = sizeof(struct mmc_queue_req);
- mq->queue->queuedata = mq;
- mq->qcnt = 0;
- ret = blk_init_allocated_queue(mq->queue);
- if (ret) {
- blk_cleanup_queue(mq->queue);
+ int ret;
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = mq_ops;
+ mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
+ BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
return ret;
- }
-
- blk_queue_prep_rq(mq->queue, mmc_prep_request);
-
- mmc_setup_queue(mq, card);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
- host->index, subname ? subname : "");
-
- if (IS_ERR(mq->thread)) {
- ret = PTR_ERR(mq->thread);
- goto cleanup_queue;
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
}
+ mq->queue->queue_lock = lock;
+ mq->queue->queuedata = mq;
+
return 0;
-cleanup_queue:
- blk_cleanup_queue(mq->queue);
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+
return ret;
}
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
- /* Make sure the queue isn't suspended, as that will deadlock */
- mmc_queue_resume(mq);
+static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ int q_depth;
+ int ret;
+
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ q_depth = MMC_QUEUE_DEPTH;
+
+ ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
+ if (ret)
+ return ret;
- /* Then terminate our worker thread */
- kthread_stop(mq->thread);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
- /* Empty the queue */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mmc_setup_queue(mq, card);
- mq->card = NULL;
+ return 0;
}
-EXPORT_SYMBOL(mmc_cleanup_queue);
/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
*
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
+ * Initialise a MMC card request queue.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+ struct mmc_host *host = card->host;
- if (!mq->suspended) {
- mq->suspended |= true;
+ mq->card = card;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mq->use_cqe = host->cqe_enabled;
- down(&mq->thread_sem);
- }
+ return mmc_mq_init(mq, card, lock);
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
}
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
void mmc_queue_resume(struct mmc_queue *mq)
{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
struct request_queue *q = mq->queue;
- unsigned long flags;
- if (mq->suspended) {
- mq->suspended = false;
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
- up(&mq->thread_sem);
+ blk_cleanup_queue(q);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
}
/*
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 547b457c4251..17e59d50b496 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -8,6 +8,20 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
@@ -20,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr);
}
-struct task_struct;
struct mmc_blk_data;
struct mmc_blk_ioc_data;
@@ -30,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
- int retune_retry_done;
};
/**
@@ -52,28 +64,34 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
+ int retries;
};
struct mmc_queue {
struct mmc_card *card;
- struct task_struct *thread;
- struct semaphore thread_sem;
- bool suspended;
- bool asleep;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- /*
- * FIXME: this counter is not a very reliable way of keeping
- * track of how many requests that are ongoing. Switch to just
- * letting the block core keep track of requests and per-request
- * associated mmc_queue_req data.
- */
- int qcnt;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -84,4 +102,22 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9cbc9f8..75d317623852 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
MMC_QUIRK_BLK_NO_CMD23),
/*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+ MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
* Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 863f1dbbfc1b..3698b0576009 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- int ret, irq;
+ int irq = -EINVAL;
+ int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
- irq = gpiod_to_irq(ctx->cd_gpio);
-
/*
- * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
*/
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
@@ -307,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 567028c9219a..67bd3344dd03 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
+ select MMC_CQHCI
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -132,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -320,7 +322,7 @@ config MMC_SDHCI_BCM_KONA
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF || ACPI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
Needed by some Fujitsu SoC for MMC / SD / SDIO support.
@@ -595,11 +597,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
- depends on SUPERH || ARM || ARM64
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
- select MMC_SDHI_SYS_DMAC if (SUPERH || ARM)
- select MMC_SDHI_INTERNAL_DMAC if ARM64
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -607,6 +606,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI
+ default MMC_SDHI if (SUPERH || ARM)
help
This provides DMA support for SDHI SD/SDIO controllers
using SYS-DMAC via DMA Engine. This supports the controllers
@@ -616,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI
+ default MMC_SDHI if ARM64
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -838,14 +839,14 @@ config MMC_USDHI6ROL0
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek PCI-E card reader
config MMC_REALTEK_USB
tristate "Realtek USB SD/MMC Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek RTS5129/39 series card reader
@@ -857,6 +858,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQHCI
+ tristate "Command Queue Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index a43cf0d5a5d3..84cd1388abc3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o
-endif
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o
-endif
+obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
+obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
@@ -92,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 63fe5091ca59..63d27589cd89 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -42,13 +42,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
-#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
-#include <asm/io.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
new file mode 100644
index 000000000000..159270e947cf
--- /dev/null
+++ b/drivers/mmc/host/cqhci.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include "cqhci.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+struct cqhci_slot {
+ struct mmc_request *mrq;
+ unsigned int flags;
+#define CQHCI_EXTERNAL_TIMEOUT BIT(0)
+#define CQHCI_COMPLETED BIT(1)
+#define CQHCI_HOST_CRC BIT(2)
+#define CQHCI_HOST_TIMEOUT BIT(3)
+#define CQHCI_HOST_OTHER BIT(4)
+};
+
+static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
+ *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
+ return;
+ }
+
+ *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
+{
+ cqhci_writel(cq_host, set, CQHCI_ISTE);
+ cqhci_writel(cq_host, set, CQHCI_ISGE);
+}
+
+#define DRV_NAME "cqhci"
+
+#define CQHCI_DUMP(f, x...) \
+ pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
+
+static void cqhci_dumpregs(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
+
+ CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CAP),
+ cqhci_readl(cq_host, CQHCI_VER));
+ CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CFG),
+ cqhci_readl(cq_host, CQHCI_CTL));
+ CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_IS),
+ cqhci_readl(cq_host, CQHCI_ISTE));
+ CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_ISGE),
+ cqhci_readl(cq_host, CQHCI_IC));
+ CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDLBA),
+ cqhci_readl(cq_host, CQHCI_TDLBAU));
+ CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDBR),
+ cqhci_readl(cq_host, CQHCI_TCN));
+ CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_DQS),
+ cqhci_readl(cq_host, CQHCI_DPT));
+ CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TCLR),
+ cqhci_readl(cq_host, CQHCI_SSC1));
+ CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_SSC2),
+ cqhci_readl(cq_host, CQHCI_CRDCT));
+ CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_RMEM),
+ cqhci_readl(cq_host, CQHCI_TERRI));
+ CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CRI),
+ cqhci_readl(cq_host, CQHCI_CRA));
+
+ if (cq_host->ops->dumpregs)
+ cq_host->ops->dumpregs(mmc);
+ else
+ CQHCI_DUMP(": ===========================================\n");
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
+{
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
+ CQHCI_TASK_DESC_SZ, CQHCI_CFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
+ cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long)cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static void __cqhci_enable(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+
+ /* Configuration must not be changed while enabled */
+ if (cqcfg & CQHCI_ENABLE) {
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ }
+
+ cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
+
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ cqcfg |= CQHCI_DCMD;
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
+ cqcfg |= CQHCI_TASK_DESC_SZ;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBA);
+ cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBAU);
+
+ cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ cqcfg |= CQHCI_ENABLE;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+
+ /* Ensure all writes are done before interrupts are enabled */
+ wmb();
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ cq_host->activated = true;
+}
+
+static void __cqhci_disable(struct cqhci_host *cq_host)
+{
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cq_host->mmc->cqe_on = false;
+
+ cq_host->activated = false;
+}
+
+int cqhci_suspend(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (cq_host->enabled)
+ __cqhci_disable(cq_host);
+
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_suspend);
+
+int cqhci_resume(struct mmc_host *mmc)
+{
+ /* Re-enable is done upon first request */
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_resume);
+
+static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int err;
+
+ if (cq_host->enabled)
+ return 0;
+
+ cq_host->rca = card->rca;
+
+ err = cqhci_host_alloc_tdl(cq_host);
+ if (err)
+ return err;
+
+ __cqhci_enable(cq_host);
+
+ cq_host->enabled = true;
+
+#ifdef DEBUG
+ cqhci_dumpregs(cq_host);
+#endif
+ return 0;
+}
+
+/* CQHCI is idle and should halt immediately, so set a small timeout */
+#define CQHCI_OFF_TIMEOUT 100
+
+static void cqhci_off(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ ktime_t timeout;
+ bool timed_out;
+ u32 reg;
+
+ if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+ return;
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, false);
+
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+
+ timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ reg = cqhci_readl(cq_host, CQHCI_CTL);
+ if ((reg & CQHCI_HALT) || timed_out)
+ break;
+ }
+
+ if (timed_out)
+ pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
+ else
+ pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+
+ mmc->cqe_on = false;
+}
+
+static void cqhci_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->enabled)
+ return;
+
+ cqhci_off(mmc);
+
+ __cqhci_disable(cq_host);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
+ cq_host->trans_desc_base,
+ cq_host->trans_desc_dma_base);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+
+ cq_host->trans_desc_base = NULL;
+ cq_host->desc_base = NULL;
+
+ cq_host->enabled = false;
+}
+
+static void cqhci_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr)
+{
+ u32 req_flags = mrq->data->flags;
+
+ *data = CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(intr) |
+ CQHCI_ACT(0x5) |
+ CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
+ CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
+ CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
+ CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
+ CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
+ CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
+ CQHCI_BLK_COUNT(mrq->data->blocks) |
+ CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
+ mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+}
+
+static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (CQHCI_VALID(1) |
+ CQHCI_END(end ? 1 : 0) |
+ CQHCI_INT(0) |
+ CQHCI_ACT(0x4) |
+ CQHCI_DAT_LENGTH(len));
+
+ if (dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cqhci_prep_tran_desc(struct mmc_request *mrq,
+ struct cqhci_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ bool dma64 = cq_host->dma64;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cqhci_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ return 0;
+}
+
+static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_R1B) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(1) |
+ CQHCI_QBAR(1) |
+ CQHCI_ACT(0x5) |
+ CQHCI_CMD_INDEX(mrq->cmd->opcode) |
+ CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
+ mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+
+}
+
+static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static inline int cqhci_tag(struct mmc_request *mrq)
+{
+ return mrq->cmd ? DCMD_SLOT : mrq->tag;
+}
+
+static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ /* First request after resume has to re-enable */
+ if (!cq_host->activated)
+ __cqhci_enable(cq_host);
+
+ if (!mmc->cqe_on) {
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+ }
+
+ if (mrq->data) {
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+ cqhci_prep_task_desc(mrq, &data, 1);
+ *task_desc = cpu_to_le64(data);
+ err = cqhci_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: cqhci: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), err);
+ return err;
+ }
+ } else {
+ cqhci_prep_dcmd_desc(mmc, mrq);
+ }
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+
+ if (cq_host->recovery_halt) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ cq_host->slot[tag].mrq = mrq;
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+ mmc_hostname(mmc), tag);
+out_unlock:
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (err)
+ cqhci_post_req(mmc, mrq);
+
+ return err;
+}
+
+static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool notify)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->recovery_halt) {
+ cq_host->recovery_halt = true;
+ pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
+ wake_up(&cq_host->wait_queue);
+ if (notify && mrq->recovery_notifier)
+ mrq->recovery_notifier(mrq);
+ }
+}
+
+static unsigned int cqhci_error_flags(int error1, int error2)
+{
+ int error = error1 ? error1 : error2;
+
+ switch (error) {
+ case -EILSEQ:
+ return CQHCI_HOST_CRC;
+ case -ETIMEDOUT:
+ return CQHCI_HOST_TIMEOUT;
+ default:
+ return CQHCI_HOST_OTHER;
+ }
+}
+
+static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
+ int data_error)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot;
+ u32 terri;
+ int tag;
+
+ spin_lock(&cq_host->lock);
+
+ terri = cqhci_readl(cq_host, CQHCI_TERRI);
+
+ pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error, terri);
+
+ /* Forget about errors when recovery has already been triggered */
+ if (cq_host->recovery_halt)
+ goto out_unlock;
+
+ if (!cq_host->qcnt) {
+ WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error,
+ terri);
+ goto out_unlock;
+ }
+
+ if (CQHCI_TERRI_C_VALID(terri)) {
+ tag = CQHCI_TERRI_C_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(cmd_error, data_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (CQHCI_TERRI_D_VALID(terri)) {
+ tag = CQHCI_TERRI_D_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (!cq_host->recovery_halt) {
+ /*
+ * The only way to guarantee forward progress is to mark at
+ * least one task in error, so if none is indicated, pick one.
+ */
+ for (tag = 0; tag < NUM_SLOTS; tag++) {
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ break;
+ }
+ }
+
+out_unlock:
+ spin_unlock(&cq_host->lock);
+}
+
+static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq) {
+ WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
+ mmc_hostname(mmc), tag);
+ return;
+ }
+
+ /* No completions allowed during recovery */
+ if (cq_host->recovery_halt) {
+ slot->flags |= CQHCI_COMPLETED;
+ return;
+ }
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+ }
+
+ mmc_cqe_request_done(mmc, mrq);
+}
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ status = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, status, CQHCI_IS);
+
+ pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
+
+ if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ cqhci_error_irq(mmc, status, cmd_error, data_error);
+
+ if (status & CQHCI_IS_TCC) {
+ /* read TCN and complete the request */
+ comp_status = cqhci_readl(cq_host, CQHCI_TCN);
+ cqhci_writel(cq_host, comp_status, CQHCI_TCN);
+ pr_debug("%s: cqhci: TCN: 0x%08lx\n",
+ mmc_hostname(mmc), comp_status);
+
+ spin_lock(&cq_host->lock);
+
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: cqhci: completing tag %lu\n",
+ mmc_hostname(mmc), tag);
+ cqhci_finish_mrq(mmc, tag);
+ }
+
+ if (cq_host->waiting_for_idle && !cq_host->qcnt) {
+ cq_host->waiting_for_idle = false;
+ wake_up(&cq_host->wait_queue);
+ }
+
+ spin_unlock(&cq_host->lock);
+ }
+
+ if (status & CQHCI_IS_TCL)
+ wake_up(&cq_host->wait_queue);
+
+ if (status & CQHCI_IS_HAC)
+ wake_up(&cq_host->wait_queue);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cqhci_irq);
+
+static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
+{
+ unsigned long flags;
+ bool is_idle;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ is_idle = !cq_host->qcnt || cq_host->recovery_halt;
+ *ret = cq_host->recovery_halt ? -EBUSY : 0;
+ cq_host->waiting_for_idle = !is_idle;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ return is_idle;
+}
+
+static int cqhci_wait_for_idle(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int ret;
+
+ wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
+
+ return ret;
+}
+
+static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool *recovery_needed)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ unsigned long flags;
+ bool timed_out;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ timed_out = slot->mrq == mrq;
+ if (timed_out) {
+ slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
+ cqhci_recovery_needed(mmc, mrq, false);
+ *recovery_needed = cq_host->recovery_halt;
+ }
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (timed_out) {
+ pr_err("%s: cqhci: timeout for tag %d\n",
+ mmc_hostname(mmc), tag);
+ cqhci_dumpregs(cq_host);
+ }
+
+ return timed_out;
+}
+
+static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
+{
+ return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
+}
+
+static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_CLEAR_ALL_TASKS;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
+
+ return ret;
+}
+
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
+static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ if (cqhci_halted(cq_host))
+ return true;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_HALT;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+}
+
+/*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+ * layers will need to send a STOP command), so we set the timeout based on a
+ * generous command timeout.
+ */
+#define CQHCI_START_HALT_TIMEOUT 5
+
+static void cqhci_recovery_start(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, true);
+
+ mmc->cqe_on = false;
+}
+
+static int cqhci_error_from_flags(unsigned int flags)
+{
+ if (!flags)
+ return 0;
+
+ /* CRC errors might indicate re-tuning so prefer to report that */
+ if (flags & CQHCI_HOST_CRC)
+ return -EILSEQ;
+
+ if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
+{
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ data->bytes_xfered = 0;
+ data->error = cqhci_error_from_flags(slot->flags);
+ } else {
+ mrq->cmd->error = cqhci_error_from_flags(slot->flags);
+ }
+
+ mmc_cqe_request_done(cq_host->mmc, mrq);
+}
+
+static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
+{
+ int i;
+
+ for (i = 0; i < cq_host->num_slots; i++)
+ cqhci_recover_mrq(cq_host, i);
+}
+
+/*
+ * By now the command and data lines should be unused so there is no reason for
+ * CQHCI to take a long time to halt, but if it doesn't halt there could be
+ * problems clearing tasks, so be generous.
+ */
+#define CQHCI_FINISH_HALT_TIMEOUT 20
+
+/* CQHCI could be expected to clear it's internal state pretty quickly */
+#define CQHCI_CLEAR_TIMEOUT 20
+
+static void cqhci_recovery_finish(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+ u32 cqcfg;
+ bool ok;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+ if (!ok) {
+ pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /* Be sure that there are no tasks */
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+ WARN_ON(!ok);
+ }
+
+ cqhci_recover_mrqs(cq_host);
+
+ WARN_ON(cq_host->qcnt);
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ cq_host->qcnt = 0;
+ cq_host->recovery_halt = false;
+ mmc->cqe_on = false;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ /* Ensure all writes are done before interrupts are re-enabled */
+ wmb();
+
+ cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
+}
+
+static const struct mmc_cqe_ops cqhci_cqe_ops = {
+ .cqe_enable = cqhci_enable,
+ .cqe_disable = cqhci_disable,
+ .cqe_request = cqhci_request,
+ .cqe_post_req = cqhci_post_req,
+ .cqe_off = cqhci_off,
+ .cqe_wait_for_idle = cqhci_wait_for_idle,
+ .cqe_timeout = cqhci_timeout,
+ .cqe_recovery_start = cqhci_recovery_start,
+ .cqe_recovery_finish = cqhci_recovery_finish,
+};
+
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct resource *cqhci_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cqhci_mem");
+ if (!cqhci_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host)
+ return ERR_PTR(-ENOMEM);
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cqhci_memres->start,
+ resource_size(cqhci_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cqhci regs\n");
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cqhci_pltfm_init);
+
+static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
+{
+ return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
+}
+
+static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
+{
+ u32 ver = cqhci_readl(cq_host, CQHCI_VER);
+
+ return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
+}
+
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cqe_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cqe_ops = &cqhci_cqe_ops;
+
+ mmc->cqe_qdepth = NUM_SLOTS;
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ mmc->cqe_qdepth -= 1;
+
+ cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
+ sizeof(*cq_host->slot), GFP_KERNEL);
+ if (!cq_host->slot) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ spin_lock_init(&cq_host->lock);
+
+ init_completion(&cq_host->halt_comp);
+ init_waitqueue_head(&cq_host->wait_queue);
+
+ pr_info("%s: CQHCI version %u.%02u\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host));
+
+ return 0;
+
+out_err:
+ pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host), err);
+ return err;
+}
+EXPORT_SYMBOL(cqhci_init);
+
+MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
+MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
new file mode 100644
index 000000000000..9e68286a07b4
--- /dev/null
+++ b/drivers/mmc/host/cqhci.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQHCI_H
+#define LINUX_MMC_CQHCI_H
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <asm/io.h>
+
+/* registers */
+/* version */
+#define CQHCI_VER 0x00
+#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
+#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
+#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
+
+/* capabilities */
+#define CQHCI_CAP 0x04
+/* configuration */
+#define CQHCI_CFG 0x08
+#define CQHCI_DCMD 0x00001000
+#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_ENABLE 0x00000001
+
+/* control */
+#define CQHCI_CTL 0x0C
+#define CQHCI_CLEAR_ALL_TASKS 0x00000100
+#define CQHCI_HALT 0x00000001
+
+/* interrupt status */
+#define CQHCI_IS 0x10
+#define CQHCI_IS_HAC BIT(0)
+#define CQHCI_IS_TCC BIT(1)
+#define CQHCI_IS_RED BIT(2)
+#define CQHCI_IS_TCL BIT(3)
+
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+
+/* interrupt status enable */
+#define CQHCI_ISTE 0x14
+
+/* interrupt signal enable */
+#define CQHCI_ISGE 0x18
+
+/* interrupt coalescing */
+#define CQHCI_IC 0x1C
+#define CQHCI_IC_ENABLE BIT(31)
+#define CQHCI_IC_RESET BIT(16)
+#define CQHCI_IC_ICCTHWEN BIT(15)
+#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
+#define CQHCI_IC_ICTOVALWEN BIT(7)
+#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
+
+/* task list base address */
+#define CQHCI_TDLBA 0x20
+
+/* task list base address upper */
+#define CQHCI_TDLBAU 0x24
+
+/* door-bell */
+#define CQHCI_TDBR 0x28
+
+/* task completion notification */
+#define CQHCI_TCN 0x2C
+
+/* device queue status */
+#define CQHCI_DQS 0x30
+
+/* device pending tasks */
+#define CQHCI_DPT 0x34
+
+/* task clear */
+#define CQHCI_TCLR 0x38
+
+/* send status config 1 */
+#define CQHCI_SSC1 0x40
+
+/* send status config 2 */
+#define CQHCI_SSC2 0x44
+
+/* response for dcmd */
+#define CQHCI_CRDCT 0x48
+
+/* response mode error mask */
+#define CQHCI_RMEM 0x50
+
+/* task error info */
+#define CQHCI_TERRI 0x54
+
+#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
+#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
+#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
+#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
+#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
+#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
+
+/* command response index */
+#define CQHCI_CRI 0x58
+
+/* command response argument */
+#define CQHCI_CRA 0x5C
+
+#define CQHCI_INT_ALL 0xF
+#define CQHCI_IC_DEFAULT_ICCTH 31
+#define CQHCI_IC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define CQHCI_VALID(x) (((x) & 1) << 0)
+#define CQHCI_END(x) (((x) & 1) << 1)
+#define CQHCI_INT(x) (((x) & 1) << 2)
+#define CQHCI_ACT(x) (((x) & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
+#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
+#define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
+#define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
+#define CQHCI_PRIORITY(x) (((x) & 1) << 13)
+#define CQHCI_QBAR(x) (((x) & 1) << 14)
+#define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
+#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
+#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
+#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
+#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+
+struct cqhci_host_ops;
+struct mmc_host;
+struct cqhci_slot;
+
+struct cqhci_host {
+ const struct cqhci_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ /* relative card address of device */
+ unsigned int rca;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+ int qcnt;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CQHCI_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool activated;
+ bool waiting_for_idle;
+ bool recovery_halt;
+
+ size_t desc_size;
+ size_t data_size;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQHCI_CFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct completion halt_comp;
+ wait_queue_head_t wait_queue;
+ struct cqhci_slot *slot;
+};
+
+struct cqhci_host_ops {
+ void (*dumpregs)(struct mmc_host *mmc);
+ void (*write_l)(struct cqhci_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cqhci_host *host, int reg);
+ void (*enable)(struct mmc_host *mmc);
+ void (*disable)(struct mmc_host *mmc, bool recovery);
+};
+
+static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+struct platform_device;
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error);
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
+int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_resume(struct mmc_host *mmc);
+
+#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 351330dfb954..8e363174f9d6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
-static unsigned __initdata use_dma = 1;
+static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
return ret;
}
-static void __init_or_module
-davinci_release_dma_channels(struct mmc_davinci_host *host)
+static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
@@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
dma_release_channel(host->dma_rx);
}
-static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
@@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
return 0;
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static int davinci_mmcsd_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
@@ -1254,8 +1253,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
goto parse_fail;
}
} else {
@@ -1414,11 +1414,12 @@ static struct platform_driver davinci_mmcsd_driver = {
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
+ .probe = davinci_mmcsd_probe,
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
-module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e0862d3f65b3..32a6a228cd12 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e8a1bb1ae694..70b0df8b9c78 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -82,6 +82,10 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
*/
struct variant_data {
unsigned int clkreg;
@@ -111,6 +115,9 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
};
static struct variant_data variant_arm = {
@@ -120,6 +127,9 @@ static struct variant_data variant_arm = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -128,6 +138,9 @@ static struct variant_data variant_arm_extended_fifo = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -137,6 +150,9 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_u300 = {
@@ -152,6 +168,9 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_nomadik = {
@@ -168,6 +187,9 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500 = {
@@ -190,6 +212,9 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500v2 = {
@@ -214,6 +239,26 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_qcom = {
@@ -232,6 +277,9 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
/* Busy detection for the ST Micro variant */
@@ -396,6 +444,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
@@ -406,7 +455,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -921,8 +973,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ host->variant->start_err |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -1286,7 +1339,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status = readl(host->base + MMCISTATUS);
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~MCI_IRQ1MASK;
@@ -1429,16 +1482,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
/*
@@ -1583,6 +1638,35 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ dev_err(&dev->dev, "failed to get pinctrl");
+ ret = PTR_ERR(host->pinctrl);
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(host->pins_default)) {
+ dev_err(mmc_dev(mmc), "Can't select default pins\n");
+ ret = PTR_ERR(host->pins_default);
+ goto host_free;
+ }
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain)) {
+ dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
+ ret = PTR_ERR(host->pins_opendrain);
+ goto host_free;
+ }
+ }
+
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1729,7 +1813,10 @@ static int mmci_probe(struct amba_device *dev,
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1809,6 +1896,7 @@ static int mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,7 +1907,9 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1951,6 +2041,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4a8bef1aac8f..f91cdf7f6dae 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -192,6 +192,8 @@
#define NR_SG 128
+#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
+
struct clk;
struct variant_data;
struct dma_chan;
@@ -223,9 +225,13 @@ struct mmci_host {
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
+ u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_opendrain;
u8 hw_designer;
u8 hw_revision:4;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index b9dfea5d8193..f13f798d8506 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -35,6 +35,28 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ struct completion dma_dataend;
+ struct tasklet_struct dma_complete;
+};
+
+struct renesas_sdhi {
+ struct clk *clk;
+ struct clk *clk_cd;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
+ u32 scc_tappos;
+};
+
+#define host_to_priv(host) \
+ container_of((host)->pdata, struct renesas_sdhi, mmc_data)
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
int renesas_sdhi_remove(struct platform_device *pdev);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235d5742..80943fa07db6 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -46,19 +47,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-#define host_to_priv(host) \
- container_of((host)->pdata, struct renesas_sdhi, mmc_data)
-
-struct renesas_sdhi {
- struct clk *clk;
- struct clk *clk_cd;
- struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default, *pins_uhs;
- void __iomem *scc_ctl;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -280,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
@@ -497,7 +485,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eprobe;
+ return ret;
}
/*
@@ -523,11 +511,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
"state_uhs");
}
- host = tmio_mmc_host_alloc(pdev);
- if (!host) {
- ret = -ENOMEM;
- goto eprobe;
- }
+ host = tmio_mmc_host_alloc(pdev, mmc_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
@@ -541,18 +527,18 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->bus_shift = of_data->bus_shift;
}
- host->dma = dma_priv;
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
- host->card_busy = renesas_sdhi_card_busy;
- host->start_signal_voltage_switch =
+ host->ops.card_busy = renesas_sdhi_card_busy;
+ host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
}
@@ -586,10 +572,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
- if (ret < 0)
+ ret = renesas_sdhi_clk_enable(host);
+ if (ret)
goto efree;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
@@ -606,7 +596,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
+ priv->scc_tappos = taps->tap;
hit = true;
break;
}
@@ -650,20 +640,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
eirq:
tmio_mmc_host_remove(host);
+edisclk:
+ renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
-eprobe:
+
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
int renesas_sdhi_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
+ renesas_sdhi_clk_disable(host);
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 41cbe84c1d18..7c03cfead6f9 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
@@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
INFO1_CLEAR);
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void
@@ -130,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
- tasklet_schedule(&host->dma_complete);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ tasklet_schedule(&priv->dma_priv.dma_complete);
}
static void
@@ -220,10 +224,12 @@ static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_complete,
+ tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
@@ -255,6 +261,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.0" },
{ .soc_id = "r8a7796", .revision = "ES1.0" },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9ab10436e4b8..82d757c480b2 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
@@ -138,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
- complete(&host->dma_dataend);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
+ struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
@@ -161,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
spin_unlock_irq(&host->lock);
- wait_for_completion(&host->dma_dataend);
+ wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
@@ -171,6 +176,7 @@ out:
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
@@ -214,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -245,6 +251,7 @@ pio:
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
@@ -293,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -341,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
spin_lock_irq(&host->lock);
- if (host && host->data) {
+ if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
@@ -359,9 +366,11 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!host->dma || (!host->pdev->dev.of_node &&
- (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ if (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -378,7 +387,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_tx,
+ priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -389,7 +398,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
- cfg.dst_addr_width = host->dma->dma_buswidth;
+ cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
@@ -398,7 +407,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_rx,
+ priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -408,7 +417,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
- cfg.src_addr_width = host->dma->dma_buswidth;
+ cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
@@ -420,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
if (!host->bounce_buf)
goto ebouncebuf;
- init_completion(&host->dma_dataend);
+ init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0848dc0f882e..30bd8081307e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -30,7 +30,7 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_sdmmc {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 76da1687ab37..78422079ecfa 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -31,7 +31,7 @@
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <asm/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a62a4a..f77493604312 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
struct s3cmci_reg {
unsigned short addr;
unsigned char *name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
DBG_REG(CON),
DBG_REG(PRE),
DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
static int s3cmci_regs_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
- struct s3cmci_reg *rptr = debug_regs;
+ const struct s3cmci_reg *rptr = debug_regs;
for (; rptr->name; rptr++)
seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
@@ -1658,7 +1660,7 @@ static int s3cmci_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq == 0) {
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b988997a1e80..4065da58789d 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
@@ -96,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
+#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
+#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
+#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
+#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
+ INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
+ u32 hs_caps;
};
static const guid_t intel_dsm_guid =
@@ -152,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
{
int err;
+ intel_host->hs_caps = ~0;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -161,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
+
+ intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -398,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid,
return 0;
}
+static int intel_setup_host(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -409,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -421,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -432,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -446,6 +481,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+/* AMD sdhci reset dll register. */
+#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
+
+static int amd_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ return MMC_SET_DRIVER_TYPE_A;
+}
+
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+{
+ /* AMD Platform requires dll setting */
+ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
+ usleep_range(10, 20);
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+}
+
+/*
+ * For AMD Platform it is required to disable the tuning
+ * bit first controller to bring to HS Mode from HS200
+ * mode, later enable to tune to HS400 mode.
+ */
+static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int old_timing = host->timing;
+
+ sdhci_set_ios(mmc, ios);
+ if (old_timing == MMC_TIMING_MMC_HS200 &&
+ ios->timing == MMC_TIMING_MMC_HS)
+ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
+ if (old_timing != MMC_TIMING_MMC_HS400 &&
+ ios->timing == MMC_TIMING_MMC_HS400) {
+ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
+ sdhci_acpi_amd_hs400_dll(host);
+ }
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+ .ops = &sdhci_acpi_ops_amd,
+};
+
+static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+
+ sdhci_read_caps(host);
+ if (host->caps1 & SDHCI_SUPPORT_DDR50)
+ host->mmc->caps = MMC_CAP_1_8V_DDR;
+
+ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
+ (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+
+ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ host->mmc_host_ops.set_ios = amd_set_ios;
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -469,6 +581,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
+ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
@@ -485,6 +598,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
+ { "AMDI0040" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -566,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ err = -EINVAL;
+ goto err_free;
+ }
host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
resource_size(iomem));
@@ -609,10 +727,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
}
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_free;
+ if (c->slot && c->slot->setup_host) {
+ err = c->slot->setup_host(pdev);
+ if (err)
+ goto err_cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_cleanup;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
@@ -625,6 +753,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return 0;
+err_cleanup:
+ sdhci_cleanup_host(c->host);
err_free:
sdhci_free_host(c->host);
return err;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 85140c9af581..cd2b5f643a15 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -193,6 +193,7 @@ struct pltfm_imx_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ unsigned int actual_clock;
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -687,6 +688,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
return;
}
+ /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
+ if (is_imx53_esdhc(imx_data)) {
+ /*
+ * According to the i.MX53 reference manual, if DLLCTRL[10] can
+ * be set, then the controller is eSDHCv3, else it is eSDHCv2.
+ */
+ val = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
+ temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val, host->ioaddr + ESDHC_DLL_CTRL);
+ if (temp & BIT(10))
+ pre_div = 2;
+ }
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
@@ -1389,11 +1404,15 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
int ret;
ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (!sdhci_sdio_irq_enabled(host)) {
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
}
@@ -1409,31 +1428,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ err = clk_prepare_enable(imx_data->clk_ahb);
+ if (err)
+ return err;
+
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
if (err)
- return err;
+ goto disable_ahb_clk;
err = clk_prepare_enable(imx_data->clk_ipg);
if (err)
goto disable_per_clk;
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
}
- err = clk_prepare_enable(imx_data->clk_ahb);
- if (err)
- goto disable_ipg_clk;
+
err = sdhci_runtime_resume_host(host);
if (err)
- goto disable_ahb_clk;
+ goto disable_ipg_clk;
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(imx_data->clk_ahb);
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_per);
+disable_ahb_clk:
+ clk_disable_unprepare(imx_data->clk_ahb);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0720ea717011..c33a5f7393bd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,11 +25,13 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#include "cqhci.h"
+#include "sdhci-pltfm.h"
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -90,6 +92,7 @@ struct sdhci_arasan_data {
struct phy *phy;
bool is_phy_on;
+ bool has_cqe;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
@@ -262,6 +265,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -269,6 +283,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -278,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_arasan_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_arasan_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = {
+ .enable = sdhci_arasan_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_arasan_dumpregs,
+};
+
+static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ .set_clock = sdhci_arasan_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
+ .irq = sdhci_arasan_cqhci_irq,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -297,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
+ if (sdhci_arasan->has_cqe) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -353,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev)
sdhci_arasan->is_phy_on = true;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret) {
+ dev_err(dev, "Cannot resume host.\n");
+ return ret;
+ }
+
+ if (sdhci_arasan->has_cqe)
+ return cqhci_resume(host->mmc);
+
+ return 0;
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -556,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!sdhci_arasan->has_cqe)
+ return sdhci_add_host(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_arasan_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -566,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ const struct sdhci_pltfm_data *pdata;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
+ pdata = &sdhci_arasan_cqe_pdata;
+ else
+ pdata = &sdhci_arasan_pdata;
+
+ host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
- sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -663,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_hs400_enhanced_strobe;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
+ sdhci_arasan->has_cqe = true;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
}
- ret = sdhci_add_host(host);
+ ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1f424374bbbb..4ffa6b173a21 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
+ u32 val;
+
sdhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ }
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c
new file mode 100644
index 000000000000..499f3205ec5c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-arasan.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with
+ * integrated phy.
+ *
+ * Copyright (C) 2017 Arasan Chip Systems Inc.
+ *
+ * Author: Atul Garg <agarg@arasan.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */
+#define PHY_ADDR_REG 0x300
+#define PHY_DAT_REG 0x304
+
+#define PHY_WRITE BIT(8)
+#define PHY_BUSY BIT(9)
+#define DATA_MASK 0xFF
+
+/* PHY Specific Registers */
+#define DLL_STATUS 0x00
+#define IPAD_CTRL1 0x01
+#define IPAD_CTRL2 0x02
+#define IPAD_STS 0x03
+#define IOREN_CTRL1 0x06
+#define IOREN_CTRL2 0x07
+#define IOPU_CTRL1 0x08
+#define IOPU_CTRL2 0x09
+#define ITAP_DELAY 0x0C
+#define OTAP_DELAY 0x0D
+#define STRB_SEL 0x0E
+#define CLKBUF_SEL 0x0F
+#define MODE_CTRL 0x11
+#define DLL_TRIM 0x12
+#define CMD_CTRL 0x20
+#define DATA_CTRL 0x21
+#define STRB_CTRL 0x22
+#define CLK_CTRL 0x23
+#define PHY_CTRL 0x24
+
+#define DLL_ENBL BIT(3)
+#define RTRIM_EN BIT(1)
+#define PDB_ENBL BIT(1)
+#define RETB_ENBL BIT(6)
+#define ODEN_CMD BIT(1)
+#define ODEN_DAT 0xFF
+#define REN_STRB BIT(0)
+#define REN_CMND BIT(1)
+#define REN_DATA 0xFF
+#define PU_CMD BIT(1)
+#define PU_DAT 0xFF
+#define ITAPDLY_EN BIT(0)
+#define OTAPDLY_EN BIT(0)
+#define OD_REL_CMD BIT(1)
+#define OD_REL_DAT 0xFF
+#define DLLTRM_ICP 0x8
+#define PDB_CMND BIT(0)
+#define PDB_DATA 0xFF
+#define PDB_STRB BIT(0)
+#define PDB_CLOCK BIT(0)
+#define CALDONE_MASK 0x10
+#define DLL_RDY_MASK 0x10
+#define MAX_CLK_BUF 0x7
+
+/* Mode Controls */
+#define ENHSTRB_MODE BIT(0)
+#define HS400_MODE BIT(1)
+#define LEGACY_MODE BIT(2)
+#define DDR50_MODE BIT(3)
+
+/*
+ * Controller has no specific bits for HS200/HS.
+ * Used BIT(4), BIT(5) for software programming.
+ */
+#define HS200_MODE BIT(4)
+#define HISPD_MODE BIT(5)
+
+#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN)
+#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN)
+#define FREQSEL(x) (((x) << 5) | DLL_ENBL)
+#define IOPAD(x, y) ((x) | ((y) << 2))
+
+/* Arasan private data */
+struct arasan_host {
+ u32 chg_clk;
+};
+
+static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ val = sdhci_readw(host, PHY_ADDR_REG);
+ if (!(val & mask))
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset)
+{
+ sdhci_writew(host, data, PHY_DAT_REG);
+ sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG);
+ return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+}
+
+static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data)
+{
+ int ret;
+
+ sdhci_writew(host, 0, PHY_DAT_REG);
+ sdhci_writew(host, offset, PHY_ADDR_REG);
+ ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+
+ /* Masking valid data bits */
+ *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK;
+ return ret;
+}
+
+static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ int ret;
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ ret = arasan_phy_read(host, offset, &val);
+ if (ret)
+ return -EBUSY;
+ else if (val & mask)
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+/* Initialize the Arasan PHY */
+static int arasan_phy_init(struct sdhci_host *host)
+{
+ int ret;
+ u8 val;
+
+ /* Program IOPADs and wait for calibration to be done */
+ if (arasan_phy_read(host, IPAD_CTRL1, &val) ||
+ arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) ||
+ arasan_phy_read(host, IPAD_CTRL2, &val) ||
+ arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2))
+ return -EBUSY;
+ ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK);
+ if (ret)
+ return -EBUSY;
+
+ /* Program CMD/Data lines */
+ if (arasan_phy_read(host, IOREN_CTRL1, &val) ||
+ arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) ||
+ arasan_phy_read(host, IOPU_CTRL1, &val) ||
+ arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) ||
+ arasan_phy_read(host, CMD_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) ||
+ arasan_phy_read(host, IOREN_CTRL2, &val) ||
+ arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) ||
+ arasan_phy_read(host, IOPU_CTRL2, &val) ||
+ arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) ||
+ arasan_phy_read(host, DATA_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) ||
+ arasan_phy_read(host, STRB_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) ||
+ arasan_phy_read(host, CLK_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) ||
+ arasan_phy_read(host, CLKBUF_SEL, &val) ||
+ arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) ||
+ arasan_phy_write(host, LEGACY_MODE, MODE_CTRL))
+ return -EBUSY;
+ return 0;
+}
+
+/* Set Arasan PHY for different modes */
+static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap,
+ u8 drv_type, u8 itap, u8 trim, u8 clk)
+{
+ u8 val;
+ int ret;
+
+ if (mode == HISPD_MODE || mode == HS200_MODE)
+ ret = arasan_phy_write(host, 0x0, MODE_CTRL);
+ else
+ ret = arasan_phy_write(host, mode, MODE_CTRL);
+ if (ret)
+ return ret;
+ if (mode == HS400_MODE || mode == HS200_MODE) {
+ ret = arasan_phy_read(host, IPAD_CTRL1, &val);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1);
+ if (ret)
+ return ret;
+ }
+ if (mode == LEGACY_MODE) {
+ ret = arasan_phy_write(host, 0x0, OTAP_DELAY);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ } else {
+ ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY);
+ if (ret)
+ return ret;
+ if (mode != HS200_MODE)
+ ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY);
+ else
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ }
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, trim, DLL_TRIM);
+ if (ret)
+ return ret;
+ }
+ ret = arasan_phy_write(host, 0, DLL_STATUS);
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS);
+ if (ret)
+ return ret;
+ ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK);
+ if (ret)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int arasan_select_phy_clock(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct arasan_host *arasan_host = sdhci_pci_priv(slot);
+ u8 clk;
+
+ if (arasan_host->chg_clk == host->mmc->ios.clock)
+ return 0;
+
+ arasan_host->chg_clk = host->mmc->ios.clock;
+ if (host->mmc->ios.clock == 200000000)
+ clk = 0x0;
+ else if (host->mmc->ios.clock == 100000000)
+ clk = 0x2;
+ else if (host->mmc->ios.clock == 50000000)
+ clk = 0x1;
+ else
+ clk = 0x0;
+
+ if (host->mmc_host_ops.hs400_enhanced_strobe) {
+ arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0,
+ DLLTRM_ICP, clk);
+ } else {
+ switch (host->mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ arasan_phy_set(host, HS200_MODE, 0x2,
+ host->mmc->ios.drv_type, 0x0,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ arasan_phy_set(host, DDR50_MODE, 0x1, 0x0,
+ 0x0, DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ arasan_phy_set(host, HS400_MODE, 0x1,
+ host->mmc->ios.drv_type, 0xa,
+ DLLTRM_ICP, clk);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA;
+ err = arasan_phy_init(slot->host);
+ if (err)
+ return -ENODEV;
+ return 0;
+}
+
+static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ sdhci_set_clock(host, clock);
+
+ /* Change phy settings for the new clock */
+ arasan_select_phy_clock(host);
+}
+
+static const struct sdhci_ops arasan_sdhci_pci_ops = {
+ .set_clock = arasan_sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_arasan = {
+ .probe_slot = arasan_pci_probe_slot,
+ .ops = &arasan_sdhci_pci_ops,
+ .priv_size = sizeof(struct arasan_host),
+};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 3e4f04fd5175..6d1a983e6227 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -30,17 +30,37 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include "cqhci.h"
+
#include "sdhci.h"
#include "sdhci-pci.h"
-static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
-static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+{
+ mmc_pm_flag_t pm_flags = 0;
+ int i;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ struct sdhci_pci_slot *slot = chip->slots[i];
+
+ if (slot)
+ pm_flags |= slot->host->mmc->pm_flags;
+ }
+
+ return device_set_wakeup_enable(&chip->pdev->dev,
+ (pm_flags & MMC_PM_KEEP_POWER) &&
+ (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+}
+
+static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
+ sdhci_pci_init_wakeup(chip);
+
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
@@ -56,9 +76,6 @@ static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
-
- if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
- sdhci_enable_irq_wakeups(host);
}
return 0;
@@ -69,52 +86,44 @@ err_pci_suspend:
return ret;
}
-static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
- mmc_pm_flag_t pm_flags = 0;
- int i;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
for (i = 0; i < chip->num_slots; i++) {
- struct sdhci_pci_slot *slot = chip->slots[i];
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
- if (slot)
- pm_flags |= slot->host->mmc->pm_flags;
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
}
- return device_init_wakeup(&chip->pdev->dev,
- (pm_flags & MMC_PM_KEEP_POWER) &&
- (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+ return 0;
}
-static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
- sdhci_pci_init_wakeup(chip);
-
- return 0;
+ return sdhci_pci_suspend_host(chip);
}
-int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
- struct sdhci_pci_slot *slot;
- int i, ret;
-
- for (i = 0; i < chip->num_slots; i++) {
- slot = chip->slots[i];
- if (!slot)
- continue;
+ int ret;
- ret = sdhci_resume_host(slot->host);
- if (ret)
- return ret;
- }
+ ret = sdhci_pci_resume_host(chip);
+ if (ret)
+ return ret;
- return 0;
+ return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
@@ -166,8 +175,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
return 0;
}
+
+static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_pci_runtime_suspend_host(chip);
+}
+
+static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = sdhci_pci_runtime_resume_host(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(chip->slots[0]->host->mmc);
+}
#endif
+static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_pci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -583,6 +632,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.voltage_switch = sdhci_intel_voltage_switch,
};
+static const struct sdhci_ops sdhci_intel_glk_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
+ .irq = sdhci_cqhci_irq,
+};
+
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
@@ -612,12 +673,80 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+ }
+
+ return ret;
+}
+
+static void glk_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
}
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops glk_cqhci_ops = {
+ .enable = glk_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_pci_dumpregs,
+};
+
+static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + 0x200;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->ops = &glk_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
return ret;
}
@@ -699,11 +828,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
+ .add_host = glk_emmc_add_host,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = sdhci_cqhci_suspend,
+ .resume = sdhci_cqhci_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = sdhci_cqhci_runtime_suspend,
+ .runtime_resume = sdhci_cqhci_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
- .ops = &sdhci_intel_byt_ops,
+ .ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -778,6 +916,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
@@ -955,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
@@ -965,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
- sdhci_pci_init_wakeup(chip);
-
return 0;
}
@@ -1306,6 +1444,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1320,7 +1459,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
* *
\*****************************************************************************/
-static int sdhci_pci_enable_dma(struct sdhci_host *host)
+int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
@@ -1543,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
}
- host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (device_can_wakeup(&pdev->dev))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 0056f08a29cc..5cbcdc448f98 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,9 @@
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
+#define PCI_VENDOR_ID_ARASAN 0x16e6
+#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+
/*
* PCI device class and mask
*/
@@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
-
+int sdhci_pci_enable_dma(struct sdhci_host *host);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
+extern const struct sdhci_pci_fixes sdhci_arasan;
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 8c0f88428556..14511526a3a8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto err_host;
+ }
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0842bbc2d7ad..4d0791f6ec23 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void xenon_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
+ .voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e9290a3439d5..070aff9c108f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
@@ -2821,25 +2828,33 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
+ SDHCI_WAKE_ON_INT;
+ u32 irq_val = 0;
+ u8 wake_val = 0;
u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
- u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
- SDHCI_INT_CARD_INT;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
+ irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
}
+
+ wake_val |= SDHCI_WAKE_ON_INT;
+ irq_val |= SDHCI_INT_CARD_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ val |= wake_val;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
+
+ host->irq_wake_enabled = !enable_irq_wake(host->irq);
+
+ return host->irq_wake_enabled;
}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
@@ -2850,6 +2865,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
+ disable_irq_wake(host->irq);
+
+ host->irq_wake_enabled = false;
}
int sdhci_suspend_host(struct sdhci_host *host)
@@ -2858,15 +2877,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
mmc_retune_timer_stop(host->mmc);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (!device_may_wakeup(mmc_dev(host->mmc)) ||
+ !sdhci_enable_irq_wakeups(host)) {
host->ier = 0;
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
}
+
return 0;
}
@@ -2894,15 +2912,14 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ } else {
ret = request_threaded_irq(host->irq, sdhci_irq,
sdhci_thread_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
}
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54bc444c317f..afab26fd70e6 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -484,6 +484,7 @@ struct sdhci_host {
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
+ bool irq_wake_enabled; /* IRQ wakeup is enabled */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -718,7 +719,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-void sdhci_enable_irq_wakeups(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 04ca0d33a521..485f7591fae4 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -10,9 +10,11 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sdhci_get_of_property(pdev);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
@@ -158,25 +159,29 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
goto err;
}
- priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
- ret = clk_prepare_enable(priv->clk_iface);
- if (ret)
- goto err;
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
- priv->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto err_clk;
- }
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_clk;
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
@@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id f_sdh30_acpi_ids[] = {
+ { "SCX0002" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
+#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
- .of_match_table = f_sdh30_dt_ids,
+ .of_match_table = of_match_ptr(f_sdh30_dt_ids),
+ .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 53fb18bb7bee..7bb00c68a756 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- u32 opc = cmd->opcode;
+ u32 opc;
u32 mask = 0;
unsigned long flags;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index cc98355dbdb9..bad612d6f879 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -3,7 +3,7 @@
* (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
* (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
- * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
* (C) Copyright 2017 Sootech SA
*
@@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
goto error_assert_reset;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto error_assert_reset;
+ }
+
return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
@@ -1393,5 +1398,5 @@ module_platform_driver(sunxi_mmc_driver);
MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>");
MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 64b7e9f18361..43a2ea5cff24 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -92,14 +92,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- host = tmio_mmc_host_alloc(pdev);
- if (!host)
+ host = tmio_mmc_host_alloc(pdev, pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
goto cell_disable;
+ }
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata, NULL);
+ host->mmc->f_max = pdata->hclk;
+ host->mmc->f_min = pdata->hclk / 512;
+
+ ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -128,15 +133,11 @@ out:
static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
- if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
- }
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3e6ff8921440..e7d651352dc9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,12 +112,6 @@
struct tmio_mmc_data;
struct tmio_mmc_host;
-struct tmio_mmc_dma {
- enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
- void (*enable)(struct tmio_mmc_host *host, bool enable);
-};
-
struct tmio_mmc_dma_ops {
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
void (*enable)(struct tmio_mmc_host *host, bool enable);
@@ -134,6 +128,7 @@ struct tmio_mmc_host {
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
+ struct mmc_host_ops ops;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
@@ -144,18 +139,15 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
- unsigned long bus_shift;
+ unsigned int bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
- struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -174,7 +166,6 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
- u32 scc_tappos;
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
@@ -185,9 +176,6 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
- int (*card_busy)(struct mmc_host *mmc);
- int (*start_signal_voltage_switch)(struct mmc_host *mmc,
- struct mmc_ios *ios);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
@@ -207,11 +195,10 @@ struct tmio_mmc_host {
const struct tmio_mmc_dma_ops *dma_ops;
};
-struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
-int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -240,26 +227,26 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return ioread16(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return ioread16(host->ctl + (addr << host->bus_shift)) |
+ ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
- readsl(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
@@ -270,26 +257,26 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
*/
if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ iowrite16(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
int addr, u32 val)
{
- writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift));
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
- writesl(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
#endif
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 583bf3262df5..33494241245a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret == 0)
set_bit(i, host->taps);
- mdelay(1);
+ usleep_range(1000, 1200);
}
ret = host->select_tuning(host);
@@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work)
tmio_mmc_finish_request(host);
}
-static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
-{
- if (!host->clk_enable)
- return -ENOTSUPP;
-
- return host->clk_enable(host);
-}
-
-static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
-{
- if (host->clk_disable)
- host->clk_disable(host);
-}
-
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -958,7 +944,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* 100us were not enough. Is this the same 140us delay, as in
* tmio_mmc_set_ios()?
*/
- udelay(200);
+ usleep_range(200, 300);
}
/*
* It seems, VccQ should be switched on after Vcc, this is also what the
@@ -966,7 +952,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
ret = regulator_enable(mmc->supply.vqmmc);
- udelay(200);
+ usleep_range(200, 300);
}
if (ret < 0)
@@ -1059,7 +1045,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Let things settle. delay taken from winCE driver */
- udelay(140);
+ usleep_range(140, 200);
if (PTR_ERR(host->mrq) == -EINTR)
dev_dbg(&host->pdev->dev,
"%s.%d: IOS interrupted: clk %u, mode %u",
@@ -1076,15 +1062,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_ro(mmc);
-
- if (ret >= 0)
- return ret;
-
- ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- return ret;
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_multi_io_quirk(struct mmc_card *card,
@@ -1098,7 +1078,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1145,19 +1125,45 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-struct tmio_mmc_host*
-tmio_mmc_host_alloc(struct platform_device *pdev)
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
+ struct resource *res;
+ void __iomem *ctl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctl))
+ return ERR_CAST(ctl);
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
+ host->ctl = ctl;
host->mmc = mmc;
host->pdev = pdev;
+ host->pdata = pdata;
+ host->ops = tmio_mmc_ops;
+ mmc->ops = &host->ops;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ host = ERR_PTR(ret);
+ goto free;
+ }
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+free:
+ mmc_free_host(mmc);
return host;
}
@@ -1169,32 +1175,24 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops)
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
+ struct tmio_mmc_data *pdata = _host->pdata;
struct mmc_host *mmc = _host->mmc;
- struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
- tmio_mmc_of_parse(pdev, pdata);
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0)
+ return -EINVAL;
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
_host->write16_hook = NULL;
- res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- return -EINVAL;
-
- ret = mmc_of_parse(mmc);
- if (ret < 0)
- return ret;
-
- _host->pdata = pdata;
- platform_set_drvdata(pdev, mmc);
-
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
@@ -1202,15 +1200,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
return ret;
- _host->ctl = devm_ioremap(&pdev->dev,
- res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl)
- return -ENOMEM;
-
- tmio_mmc_ops.card_busy = _host->card_busy;
- tmio_mmc_ops.start_signal_voltage_switch =
- _host->start_signal_voltage_switch;
- mmc->ops = &tmio_mmc_ops;
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret)
+ return ret;
+ }
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
@@ -1233,7 +1227,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
mmc->max_seg_size = mmc->max_req_size;
- _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ if (mmc_can_gpio_ro(mmc))
+ _host->ops.get_ro = mmc_gpio_get_ro;
+
+ _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1246,18 +1243,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_MIN_RCAR2)
_host->native_hotplug = true;
- if (tmio_mmc_clk_enable(_host) < 0) {
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- }
-
- /*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
- * looping forever...
- */
- if (mmc->f_min == 0)
- return -EINVAL;
-
/*
* While using internal tmio hardware logic for card detection, we need
* to ensure it stays powered for it to work.
@@ -1293,7 +1278,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
- _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);
@@ -1307,14 +1291,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret)
- goto remove_host;
-
- mmc_gpiod_request_cd_irq(mmc);
- }
-
return 0;
remove_host:
@@ -1343,16 +1319,27 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
#ifdef CONFIG_PM
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
+{
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ return host->clk_enable(host);
+}
+
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
@@ -1372,8 +1359,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
int tmio_mmc_host_runtime_resume(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_reset(host);
tmio_mmc_clk_enable(host);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 0806f72102c0..a85af236b44d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -904,9 +904,6 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- if (from + len > mtd->size)
- return -EINVAL;
-
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
@@ -990,36 +987,6 @@ err_in_read:
goto out;
}
-/**
- * doc_read - Read bytes from flash
- * @mtd: the device
- * @from: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
- *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
- *
- * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
- */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_oob_ops ops;
- size_t ret;
-
- memset(&ops, 0, sizeof(ops));
- ops.datbuf = buf;
- ops.len = len;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = doc_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
@@ -1471,8 +1438,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
- if (ofs + len > mtd->size)
- return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1513,39 +1478,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
return ret;
}
-/**
- * doc_write - Write a buffer to the chip
- * @mtd: the device
- * @to: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to write (must be a full page size, ie. 512)
- * @retlen: the number of bytes actually written (0 or 512)
- * @buf: the buffer to get bytes from
- *
- * Writes data to the chip.
- *
- * Returns 0 if write successful, -EIO if write error
- */
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct docg3 *docg3 = mtd->priv;
- int ret;
- struct mtd_oob_ops ops;
-
- doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
- ops.datbuf = (char *)buf;
- ops.len = len;
- ops.mode = MTD_OPS_PLACE_OOB;
- ops.oobbuf = NULL;
- ops.ooblen = 0;
- ops.ooboffs = 0;
-
- ret = doc_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
@@ -1866,8 +1798,6 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dbe6a1de2bb8..a4e18f6aaa33 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -307,10 +307,18 @@ static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
+ spi_nor_restore(&flash->spi_nor);
+
/* Clean up MTD stuff. */
return mtd_device_unregister(&flash->spi_nor.mtd);
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ spi_nor_restore(&flash->spi_nor);
+}
/*
* Do NOT add to this array without reading the following:
*
@@ -386,6 +394,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = m25p_remove,
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 8956b7dcc984..75f71d166fd6 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -68,6 +68,7 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -84,12 +85,16 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
@@ -100,6 +105,7 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -117,12 +123,16 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f80e911b8843..28553c840d32 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -503,6 +503,11 @@ int add_mtd_device(struct mtd_info *mtd)
return -EEXIST;
BUG_ON(mtd->writesize == 0);
+
+ if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -1053,7 +1058,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1068,11 +1086,25 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
ledtrig_mtd_activity();
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
@@ -1114,7 +1146,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
if (!ops->oobbuf)
ops->ooblen = 0;
- if (offs < 0 || offs + ops->len >= mtd->size)
+ if (offs < 0 || offs + ops->len > mtd->size)
return -EINVAL;
if (ops->ooblen) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index be088bccd593..76cd21d1171b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -105,34 +105,17 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = mtd_to_part(mtd);
+ struct mtd_ecc_stats stats;
int res;
- if (from >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && from + ops->len > mtd->size)
- return -EINVAL;
-
- /*
- * If OOB is also requested, make sure that we do not read past the end
- * of this partition.
- */
- if (ops->oobbuf) {
- size_t len, pages;
-
- len = mtd_oobavail(mtd, ops);
- pages = mtd_div_by_ws(mtd->size, mtd);
- pages -= mtd_div_by_ws(from, mtd);
- if (ops->ooboffs + ops->ooblen > pages * len)
- return -EINVAL;
- }
-
+ stats = part->parent->ecc_stats;
res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected++;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed++;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -189,10 +172,6 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = mtd_to_part(mtd);
- if (to >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && to + ops->len > mtd->size)
- return -EINVAL;
return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
@@ -435,8 +414,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
- slave->mtd._read = part_read;
- slave->mtd._write = part_write;
+ if (parent->_read)
+ slave->mtd._read = part_read;
+ if (parent->_write)
+ slave->mtd._write = part_write;
if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f07492c6f4b2..7eb0e1f4f980 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1223,8 +1223,9 @@ static int mtdswap_show(struct seq_file *s, void *data)
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
mutex_lock(&d->mbd_dev->lock);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aafed9a2..e6b8c59f2c0d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on !MTD_NAND_MARVELL
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
+config MTD_NAND_MARVELL
+ tristate "NAND controller support on Marvell boards"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time. The GPMI may conflicts with other
- block, such as SD card. So pay attention to it when you enable
- the GPMI.
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a1349aad3..921634ba400c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a56bc23..b2f00b398490 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
return 0;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca6ce43..87bbd177b3e5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d8c012..c28fd2bc1a84 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int ret;
if (!buf) {
- buf = chip->buffers->databuf;
+ buf = chip->data_buf;
/* Invalidate page cache */
chip->pagebuf = -1;
}
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
if (ret)
return ret;
@@ -1763,7 +1762,7 @@ try_dmaread:
err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
addr);
/* erased page bitflips corrected */
- if (err > 0)
+ if (err >= 0)
return err;
}
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(chip);
}
return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c438a57..567ff972d5fc 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status = 0;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
uint32_t ctrl;
int err = 0;
int old_dma;
- struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- &cafe->dmaaddr, GFP_KERNEL);
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf) {
err = -ENOMEM;
goto out_irq;
}
- cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
/* Set up DMA address */
- cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
- if (sizeof(cafe->dmaaddr) > 4)
- /* Shift in two parts to shut the compiler up */
- cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
- else
- cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* this driver does not need the @ecccalc and @ecccode */
- nbuf->ecccalc = NULL;
- nbuf->ecccode = NULL;
- nbuf->databuf = (uint8_t *)(nbuf + 1);
-
/* Restore the DMA flag */
usedma = old_dma;
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
out_free_dma:
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8ae8c04..313c7f50621b 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- uint8_t *ecc_code = chip->buffers->ecccode;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int i, ret, stat;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ int i, stat;
for (i = 0; i < ecc_steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int page, int write)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
- unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int i, pos, len;
/* BBM at the beginning of the OOB area */
- chip->cmdfunc(mtd, start_cmd, writesize, page);
if (write)
- chip->write_buf(mtd, bufpoi, oob_skip);
+ nand_prog_page_begin_op(chip, page, writesize, bufpoi,
+ oob_skip);
else
- chip->read_buf(mtd, bufpoi, oob_skip);
+ nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
bufpoi += oob_skip;
/* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- chip->cmdfunc(mtd, rnd_cmd, pos, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, pos, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, pos, bufpoi, len,
+ false);
bufpoi += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
bufpoi += len;
}
}
/* OOB free */
len = oobsize - (bufpoi - chip->oob_poi);
- chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, size - len, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, size - len, bufpoi, len,
+ false);
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int ret, i, pos, len;
- ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
if (ret)
return ret;
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(buf, dma_buf + pos, len);
+ memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(buf, dma_buf + writesize + oob_skip,
+ memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(oob, dma_buf + writesize, oob_skip);
+ memcpy(oob, tmp_buf + writesize, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(oob, dma_buf + pos, len);
+ memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(oob, dma_buf + writesize + oob_skip,
+ memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, dma_buf + size - len, len);
+ memcpy(oob, tmp_buf + size - len, len);
}
return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status;
denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* This simplifies the logic.
*/
if (!buf || !oob_required)
- memset(dma_buf, 0xff, size);
+ memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout */
if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, buf, len);
+ memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(dma_buf + writesize + oob_skip, buf,
+ memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(dma_buf + writesize, oob, oob_skip);
+ memcpy(tmp_buf + writesize, oob, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, oob, len);
+ memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(dma_buf + writesize + oob_skip, oob,
+ memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(dma_buf + size - len, oob, len);
+ memcpy(tmp_buf + size - len, oob, len);
}
- return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
irq_status = denali_wait_for_irq(denali,
INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+ return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
chip->read_buf = denali_read_buf;
chip->write_buf = denali_write_buf;
}
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066dacac..9ad33d237378 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+void denali_remove(struct denali_nand_info *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae31412..49cb3e1f8bd0 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc52e2e..6bc93ea66f50 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
int status;
DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(this, NULL);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(this);
doc->curchip = chip;
doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc71662..72f1327c4430 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+ nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
+ int status;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
poll_status(doc);
write_nop(docptr);
- return nand->waitfunc(mtd, nand);
+ status = nand->waitfunc(mtd, nand);
+ if (status < 0)
+ return status;
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, bool use_ecc)
+ const uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s...\n", __func__);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
- return 0;
+ return nand_prog_page_end_op(nand);
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, false);
+ return write_page(mtd, nand, buf, page, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, true);
+ return write_page(mtd, nand, buf, page, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f90aa2c..8b6dcd739ecb 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
- fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
const uint8_t *buf, int oob_required, int page)
{
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac7f34c..4872a7ba6503 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9bf49e..f49ed46fa770 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int off, len, group = 0;
/*
* ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
- chip->read_buf(mtd, oob + j, len);
+ nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 484f7fbc3f7d..a8bde6665c24 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
goto out_ce;
}
- gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
- if (IS_ERR(gpiomtd->nwp)) {
- ret = PTR_ERR(gpiomtd->nwp);
+ gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->ale)) {
+ ret = PTR_ERR(gpiomtd->ale);
goto out_ce;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a1b983..61fdd733492f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page_data(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
void *auxiliary_virt;
@@ -1067,9 +1069,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
- /* handle the block mark swapping */
- block_mark_swapping(this, payload_virt, auxiliary_virt);
-
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
@@ -1097,8 +1096,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, eccbuf, eccbytes);
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
/*
* ECC data are not byte aligned and we may have
@@ -1158,6 +1157,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, auxiliary_virt);
+
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
+}
+
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
meta = geo->metadata_size;
if (first) {
col = meta + (size + ecc_parity_size) * first;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
meta = 0;
buf = buf + first * size;
}
+ nand_read_page_op(chip, page, col, NULL, 0);
+
/* Save the old environment */
r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Read the subpage now */
this->swap_block_mark = false;
- max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
/* Restore */
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "ecc write page.\n");
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
payload_virt, payload_phys);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
chip->oob_poi[0] = chip->read_byte(mtd);
}
@@ -1432,7 +1448,6 @@ static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
struct mtd_oob_region of = { };
- int status = 0;
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
- chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
}
/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
int step;
- chip->read_buf(mtd, tmp_buf,
- mtd->writesize + mtd->oobsize);
+ nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
/*
* If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
-
- chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
- return 0;
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
- int column, page, status, chipnr;
+ int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
chip->select_chip(mtd, -1);
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
- chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ nand_read_page_op(chip, page, 12, NULL, 0);
chip->read_buf(mtd, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int status;
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
- /* Compute the page address. */
- page = block * block_size_in_pages;
-
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
- /* Wait for the erase to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = nand_erase_op(chip, block);
+ if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- /* Wait for the write to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
/* Send the command to read the conventional block mark. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->read_byte(mtd);
chip->select_chip(mtd, -1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce13d10..06c1f993912c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
};
/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
- enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
+int common_nfc_set_geometry(struct gpmi_nand_data *);
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_init(struct gpmi_nand_data *);
+int gpmi_extra_init(struct gpmi_nand_data *);
+void gpmi_clear_bch(struct gpmi_nand_data *);
+void gpmi_dump_info(struct gpmi_nand_data *);
+int bch_set_geometry(struct gpmi_nand_data *);
+int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+int gpmi_send_command(struct gpmi_nand_data *);
+void gpmi_begin(struct gpmi_nand_data *);
+void gpmi_end(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *);
+int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261c3e17..cb862793ab6d 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct hinfc_host *host = nand_get_controller_data(chip);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4af3e9..613b00a9604b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 id[2];
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
/* Retrieve the IDs from the first chip. */
chip->select_chip(mtd, 0);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- *nand_maf_id = chip->read_byte(mtd);
- *nand_dev_id = chip->read_byte(mtd);
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ *nand_maf_id = id[0];
+ *nand_dev_id = id[1];
} else {
/* Detect additional chip. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- if (*nand_maf_id != chip->read_byte(mtd)
- || *nand_dev_id != chip->read_byte(mtd)) {
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
ret = -ENODEV;
goto notfound_id;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468db653..e357948a7505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Writing Command and Address */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
memcpy(dma_buf, buf, mtd->writesize);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(mtd, chip);
}
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a1554d..5f7cc6da0a7f 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
int page)
{
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
uint8_t *pb;
int error;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
/* Write ECC data to device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
- chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 000000000000..2196f2a233d6
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+};
+
+/**
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[0];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * NAND controller capabilities for distinction between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * NAND controller structure: stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ecc_clk: ECC block clock, two times the NAND controller clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @caps: NAND controller capabilities for each compatible string
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ecc_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * NAND controller timings expressed in NAND Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+
+/**
+ * NAND driver structure filled during the parsing of the ->exec_op() subop
+ * subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+ if (!ret) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
+ nfc->selected_chip = NULL;
+ marvell_nand->selected_die = -1;
+ return;
+ }
+
+ /*
+ * Do not change the timing registers when using the DT property
+ * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
+ * marvell_nand structure are supposedly empty.
+ */
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check a chunk is correct or not according to hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before declaring the subpage corrupted.
+ */
+static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the naked read operation only on the last chunk.
+ * Otherwise, use monolithic read.
+ */
+ if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
+ u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ecc_offset_in_page, ret;
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ *
+ * However, for any subpage read error reported by ->correct(), the ECC
+ * bytes must be read in raw mode and the full subpage must be checked
+ * to see if it is entirely empty of if there was an actual error.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ /* Derive ECC bytes positions (in page/buffer) and length */
+ ecc = chip->oob_poi +
+ (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * ALIGN(lt->ecc_bytes, 32));
+ ecc_offset_in_page =
+ (chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes)) +
+ (chunk < lt->full_chunk_cnt ?
+ lt->data_bytes + lt->spare_bytes :
+ lt->last_data_bytes + lt->last_spare_bytes);
+ ecc_len = chunk < lt->full_chunk_cnt ?
+ lt->ecc_bytes : lt->last_ecc_bytes;
+
+ /* Do the actual raw read of the ECC bytes */
+ nand_change_read_column_op(chip, ecc_offset_in_page,
+ ecc, ecc_len, false);
+
+ /* Derive data/spare bytes positions (in buffer) and length */
+ data = buf + (chunk * lt->data_bytes);
+ data_len = chunk < lt->full_chunk_cnt ?
+ lt->data_bytes : lt->last_data_bytes;
+ spare = chip->oob_poi + (chunk * (lt->spare_bytes +
+ lt->ecc_bytes));
+ spare_len = chunk < lt->full_chunk_cnt ?
+ lt->spare_bytes : lt->last_spare_bytes;
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
+ spare_len, ecc, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len = nand_subop_get_data_len(subop, op_id);
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
+ if (chip->ecc_step_ds && chip->ecc_strength_ds) {
+ ecc->size = chip->ecc_step_ds;
+ ecc->strength = chip->ecc_strength_ds;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface
+ *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH) |
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE |
+ NDTR1_TR(nfc_tmg.tR);
+
+ return 0;
+}
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). CS are only incremented one by one while the RB
+ * pin is always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata) {
+ nsels = 1;
+ } else if (nfc->caps->legacy_of_bindings &&
+ !of_get_property(np, "num-cs", &nsels)) {
+ dev_err(dev, "missing num-cs property\n");
+ return -EINVAL;
+ } else if (!of_get_property(np, "reg", &nsels)) {
+ dev_err(dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ if (!pdata)
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
+ (nsels *
+ sizeof(struct marvell_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->exec_op = marvell_nfc_exec_op;
+ chip->select_chip = marvell_nfc_select_chip;
+ if (nfc->caps->is_nfcv2 &&
+ !of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->setup_data_interface = marvell_nfc_setup_data_interface;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_data_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+ ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
+ if (ret) {
+ dev_err(dev, "could not identify the nand chip\n");
+ return ret;
+ }
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts[0],
+ pdata->nr_parts[0]);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(nand_to_mtd(&entry->chip));
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(nfc->dev, "No resource defined for data DMA\n");
+ return -ENXIO;
+ }
+
+ param.drcmr = r->start;
+ param.prio = PXAD_PRIO_LOWEST;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ nfc->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, nfc->dev,
+ "data");
+ if (!nfc->dma_chan) {
+ dev_err(nfc->dev,
+ "Unable to request data DMA channel\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf)
+ return -ENOMEM;
+
+ nfc->use_dma = true;
+
+ return 0;
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+
+ regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
+ reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
+ regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+
+ regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
+ reg |= GENCONF_ND_CLK_CTRL_EN;
+ regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_hw_control_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->ecc_clk))
+ return PTR_ERR(nfc->ecc_clk);
+
+ ret = clk_prepare_enable(nfc->ecc_clk);
+ if (ret)
+ return ret;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return 0;
+}
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214d169e..40d86a861a70 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCIRQ_EN (0x80)
-#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ECC_DECDONE (0x124)
-#define ECC_DECIRQ_EN (0x200)
-#define ECC_DECIRQ_STA (0x204)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
- ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
struct mtk_ecc_caps {
u32 err_mask;
const u8 *ecc_strength;
+ const u32 *ecc_regs;
u8 num_ecc_strength;
- u32 encode_parity_reg0;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
int pg_irq_sel;
};
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
40, 44, 48, 52, 56, 60, 68, 72, 80
};
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
- enum mtk_ecc_operation op;
u32 dec, enc;
- dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
if (dec) {
- op = ECC_DECODE;
- dec = readw(ecc->regs + ECC_DECDONE);
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
- enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
- if (enc) {
- op = ECC_ENCODE;
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
complete(&ecc->done);
- } else {
+ else
return IRQ_NONE;
- }
}
return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
- config->strength * ECC_PARITY_BITS;
+ config->strength * ecc->caps->parity_bits;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
- writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
- if (op == ECC_DECODE)
+ if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
- writew(0, ecc->regs + ECC_IRQ_REG(op));
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
- len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
- ecc->regs + ecc->caps->encode_parity_reg0,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
- .encode_parity_reg0 = 0x10,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
- .encode_parity_reg0 = 0x300,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 1,
};
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 7,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
},
{},
};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
- max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14f1b80..a455df080952 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
#include <linux/types.h>
-#define ECC_PARITY_BITS (14)
-
enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d131cc0..6977da3a26aa 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
-#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
74
};
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
- reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
- reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u32 reg;
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!raw)
mtk_ecc_disable(nfc->ecc);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- int ret;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
- if (ret < 0)
- return -EIO;
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
-
- return ret & NAND_STATUS_FAIL ? -EIO : 0;
+ return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
- if (column != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+ nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
- ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
- if (mtd->writesize > 512) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return ret;
/* calculate oob bytes except ecc parity data */
- free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
free = spare - free;
/*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
}
}
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
},
{}
};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
- struct mtd_info *mtd;
int ret;
u32 i;
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
- mtd = nand_to_mtd(nand);
- for (i = 0; i < chip->nsels; i++) {
- nand->select_chip(mtd, i);
- nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- }
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6135d007a068..e70ca16a5118 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
{
register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
- if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
- ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status;
unsigned long timeo = 400;
+ u8 status;
+ int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
- status = (int)chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- const struct nand_data_interface *conf;
int ret;
if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
* timings to timing mode 0.
*/
- conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, chipnr, conf);
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (!chip->setup_data_interface || !chip->data_interface)
+ if (!chip->setup_data_interface)
return 0;
/*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
goto err;
}
- ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
err:
return ret;
}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
- chip->data_interface = kzalloc(sizeof(*chip->data_interface),
- GFP_KERNEL);
- if (!chip->data_interface)
- return -ENOMEM;
for (mode = fls(modes) - 1; mode >= 0; mode--) {
- ret = onfi_init_data_interface(chip, chip->data_interface,
- NAND_SDR_IFACE, mode);
+ ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;
- /* Pass -1 to only */
+ /*
+ * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
+ * controller supports the requested timings.
+ */
ret = chip->setup_data_interface(mtd,
NAND_DATA_IFACE_CHECK_ONLY,
- chip->data_interface);
+ &chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0;
}
-static void nand_release_data_interface(struct nand_chip *chip)
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- kfree(chip->data_interface);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop:\n");
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ int ret;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ const struct nand_op_parser_pattern *pattern;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &ctx))
+ continue;
+
+ nand_op_parser_trace(&ctx);
+
+ if (check_only)
+ break;
+
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (i == parser->npatterns) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
- * Returns 0 for success or negative error code otherwise
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_data_interface for details), do the reset operation, and
+ * apply back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* interface settings, hence this weird ->select_chip() dance.
*/
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
chip->select_chip(mtd, chipnr);
+ chip->data_interface = saved_data_intf;
ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->read_buf(mtd, buf, eccsize);
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->read_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
p = bufpoi + data_col_addr;
- chip->read_buf(mtd, p, datafrag_len);
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+ chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
gaps = 1;
if (gaps) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
(busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
- &chip->buffers->ecccode[i],
+ &chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
stat = chip->ecc.correct(mtd, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
return max_bitflips;
}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+ bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
/* Invalidate page cache */
chip->pagebuf = -1;
}
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
}
if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
@@ -2027,33 +3605,6 @@ read_retry:
}
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_READING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2061,9 +3612,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -2081,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
- int i, toread, sndrnd = 0, pos;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
+ int ret;
+
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
bufpoi += toread;
length -= toread;
}
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2113,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
*/
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
@@ -2140,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
@@ -2154,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
} else
pos = eccsize;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
@@ -2163,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
bufpoi += len;
length -= len;
}
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_oob_syndrome);
@@ -2199,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
@@ -2214,21 +3787,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2256,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2281,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -2299,13 +3859,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->retlen = 0;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > mtd->size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
@@ -2336,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
- return 0;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
@@ -2362,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->write_buf(mtd, buf, eccsize);
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->write_buf(mtd, oob, eccbytes);
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->write_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2403,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
@@ -2433,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
@@ -2447,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2469,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -2478,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, buf, ecc_size);
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
@@ -2503,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2537,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ int ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -2589,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required, page);
@@ -2605,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- if (nand_standard_page_accessors(&chip->ecc)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- }
-
return 0;
}
@@ -2737,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ memcpy(&chip->data_buf[column], buf, bytes);
+ wbuf = chip->data_buf;
}
if (unlikely(oob)) {
@@ -2822,33 +4434,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_WRITING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -2874,22 +4459,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(to >> chip->chip_shift);
/*
@@ -2945,13 +4514,6 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
@@ -2984,11 +4546,12 @@ out:
static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
/* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
- return chip->waitfunc(mtd, chip);
+ return nand_erase_op(chip, eraseblock);
}
/**
@@ -3072,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
/* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
+ if (status) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3215,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int status;
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return nand_set_features_op(chip, addr, subfeature_param);
}
/**
@@ -3243,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
/**
@@ -3319,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
- if (chip->cmdfunc == NULL)
+ if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
@@ -3396,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
- int ret = -EINVAL;
+ int ret;
int len;
int i;
@@ -3411,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
return -ENOMEM;
/* Send our own NAND_CMD_PARAM. */
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
/* Use the Change Read Column command to skip the ONFI param pages. */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- sizeof(*p) * p->num_of_param_pages , -1);
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
- /* Read out the Extended Parameter Page. */
- chip->read_buf(mtd, (uint8_t *)ep, len);
+ ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
@@ -3471,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ char id[4];
+ int i, ret, val;
/* Try ONFI for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
- if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
- chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3574,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_jedec_params *p = &chip->jedec_params;
struct jedec_ecc_info *ecc;
- int val;
- int i, j;
+ char id[5];
+ int i, val, ret;
/* Try JEDEC for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'C')
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
le16_to_cpu(p->crc))
@@ -3866,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
- int busw;
- int i;
+ int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
@@ -3875,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip, 0);
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
/* Select the device */
chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
/* Read manufacturer and device IDs */
- maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ maf_id = id_data[0];
+ dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
@@ -3894,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* not match, ignore the device completely.
*/
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
/* Read entire ID string */
- for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
- id_data[i] = chip->read_byte(mtd);
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4190,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
+ /* Enforce the right timings for reset/detection */
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+
ret = nand_dt_init(chip);
if (ret)
return ret;
@@ -4197,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
+ * populated.
+ */
+ if (!chip->exec_op) {
/*
- * Default functions assigned for chip_select() and
- * cmdfunc() both expect cmd_ctrl() to be populated,
- * so we need to check that that's the case
+ * Default functions assigned for ->cmdfunc() and
+ * ->select_chip() both expect ->cmd_ctrl() to be populated.
*/
- pr_err("chip.cmd_ctrl() callback is not provided");
- return -EINVAL;
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ pr_err("->cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
}
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -4225,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
chip->select_chip(mtd, i);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
- if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd)) {
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
chip->select_chip(mtd, -1);
break;
}
@@ -4600,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
-static bool invalid_ecc_page_accessors(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (nand_standard_page_accessors(ecc))
- return false;
-
- /*
- * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
- * controller driver implements all the page accessors because
- * default helpers are not suitable when the core does not
- * send the READ0/PAGEPROG commands.
- */
- return (!ecc->read_page || !ecc->write_page ||
- !ecc->read_page_raw || !ecc->write_page_raw ||
- (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
- (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
- ecc->hwctl && ecc->calculate));
-}
-
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4632,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf = NULL;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4641,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
return -EINVAL;
}
- if (invalid_ecc_page_accessors(chip)) {
- pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
- }
-
- if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
-
- nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccalc) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccode) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!nbuf->databuf) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- chip->buffers = nbuf;
- } else if (!chip->buffers) {
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
- }
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4685,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ret = nand_manufacturer_init(chip);
chip->select_chip(mtd, -1);
if (ret)
- goto err_free_nbuf;
+ goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
@@ -4836,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_nand_manuf_cleanup;
}
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
@@ -4917,8 +6444,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
@@ -4954,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->select_chip(mtd, -1);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -4964,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
return 0;
-err_nand_data_iface_cleanup:
- nand_release_data_interface(chip);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
-err_free_nbuf:
- if (nbuf) {
- kfree(nbuf->databuf);
- kfree(nbuf->ecccode);
- kfree(nbuf->ecccalc);
- kfree(nbuf);
- }
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
return ret;
}
@@ -5028,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- nand_release_data_interface(chip);
-
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
- kfree(chip->buffers->databuf);
- kfree(chip->buffers->ecccode);
- kfree(chip->buffers->ecccalc);
- kfree(chip->buffers);
- }
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b6739bf8..36092850be2c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd_to_nand(mtd);
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ return create_bbt(mtd, this->data_buf, bd, -1);
}
/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751eda317..d542908a0ebb 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
struct mtd_info *mtd = nand_to_mtd(chip);
- u8 jedecid[6] = { };
- int i = 0;
+ u16 column = ((u16)addr << 8) | addr;
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- for (i = 0; i < 5; i++)
- jedecid[i] = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
+ chip->write_byte(mtd, val);
- return !strcmp("JEDEC", jedecid);
+ return 0;
}
static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
- int status;
- int i;
+ int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
/*
* Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
- int column = hynix->read_retry->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, values[i]);
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
}
/* Apply the new settings. */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
for (i = 0; i < info->nregs; i++) {
- int column = info->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, info->values[i]);
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
}
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
/* Sequence to enter OTP mode? */
- chip->cmdfunc(mtd, 0x17, -1, -1);
- chip->cmdfunc(mtd, 0x04, -1, -1);
- chip->cmdfunc(mtd, 0x19, -1, -1);
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
/* Now read the page */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
- chip->read_buf(mtd, buf, info->size);
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
/* Put everything back to normal */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
- chip->write_byte(mtd, 0x0);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- return 0;
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c376e8..02e109ae73f1 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
- int status;
- int max_bitflips = 0;
+ u8 status;
+ int ret, max_bitflips = 0;
- micron_nand_on_die_ecc_setup(chip, true);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = chip->read_byte(mtd);
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
+
/*
* The internal ECC doesn't tell us the number of bitflips
* that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
else if (status & NAND_STATUS_WRITE_RECOMMENDED)
max_bitflips = chip->ecc.strength;
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
-
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+out:
micron_nand_on_die_ecc_setup(chip, false);
- return max_bitflips;
+ return ret ? ret : max_bitflips;
}
static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int status;
-
- micron_nand_on_die_ecc_setup(chip, true);
+ int ret;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+ ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int
-micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
-{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
-
- return 0;
-}
-
-static int
-micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return ret;
}
enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.bytes = 8;
chip->ecc.size = 512;
chip->ecc.strength = 4;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw =
- micron_nand_read_page_raw_on_die_ecc;
- chip->ecc.write_page_raw =
- micron_nand_write_page_raw_on_die_ecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
}
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f0129ae7..ef022f62f74c 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
} else {
nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->ecc_step_ds = 512;
+ chip->ecc_strength_ds = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
}
}
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533bcc5bd..9400d039ddbd 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
- * @iface: The data interface to be initialized
* @mode: The ONFI timing mode
*/
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode)
{
+ struct nand_data_interface *iface = &chip->data_interface;
+
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
- return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
+EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c4906a..8cdf7d3d8fa7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
int ret;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
- u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
- chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total);
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
/* Calculate ecc bytes */
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9ccbe60..d1979c7dbe7e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
int i, id, ntypes;
+ u8 idbuf[2];
ntypes = ARRAY_SIZE(builtin_flash_types);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- id = chip->read_byte(mtd);
- id |= chip->read_byte(mtd) << 0x8;
+ nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
+ id = idbuf[0] | (idbuf[1] << 8);
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@@ -963,6 +961,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
switch (command) {
case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
break;
@@ -1350,10 +1349,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1363,7 +1362,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
struct pxa3xx_nand_info *info = host->info_data;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1ac5646..563b759ffca6 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -1725,6 +1726,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf = NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -1750,6 +1752,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int i, ret;
int read_loc;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = chip->oob_poi;
@@ -1850,6 +1853,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1902,6 +1907,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1916,6 +1924,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1970,6 +1979,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1990,7 +2002,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
- int ret, status = 0;
+ int ret;
host->use_ecc = true;
@@ -2027,11 +2039,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2089,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret, status = 0;
+ int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2114,11 +2122,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2636,6 +2640,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287af4614..595635b9e9de 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
- int status;
+ u8 status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = (int)chip->read_byte(mtd);
+ nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(mtd, 0);
- dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(dev->chip);
dev->chip->select_chip(mtd, -1);
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a4f5f3..c4e7755448e6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e58b0f..1581671b05ae 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
#define SM_SMALL_OOB_SIZE 8
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be3e766..f5a55c63935c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
int ret;
if (*cur_off != data_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
- if (nand->options & NAND_NEED_SCRAMBLING) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- } else {
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- }
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
true, page);
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- offset + mtd->writesize, -1);
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
if (!randomize)
sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
!i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
*/
- if (randomized) {
- /* TODO: use DMA to read page in raw mode */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- }
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
int ret;
if (data_off != *cur_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN,
- offset + mtd->writesize, -1);
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
{
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
chip->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
}
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
buf, page);
}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
sunxi_nfc_randomizer_config(mtd, page, false);
sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
NULL, page);
- return 0;
+ return nand_prog_page_end_op(chip);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
}
static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- int ret, status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ int ret;
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f03943..c5bee00b7f5e 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
tango_read_buf(mtd, *buf, len);
*buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->waitfunc(mtd, chip);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc->write_page = tango_write_page;
ecc->read_oob = tango_read_oob;
ecc->write_oob = tango_write_oob;
- ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
err = nand_scan_tail(mtd);
if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32332e1..dcaa924502de 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
+ u8 status;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
- nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return nand_chip->read_byte(mtd);
+ nand_status_op(nand_chip, &status);
+ return status;
}
/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b48a05..80d31a58e558 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int eccsize = chip->ecc.size;
int stat;
- vf610_nfc_read_buf(mtd, buf, eccsize);
+ nand_read_page_op(chip, page, 0, buf, eccsize);
if (oob_required)
vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc->use_hw_ecc = true;
nfc->write_sz = mtd->writesize + mtd->oobsize;
- return 0;
+ return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6a2b11..9dc15748947b 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
- devices. For further information see
- <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+ devices.
if MTD_ONENAND
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on OF || COMPILE_TEST
help
- Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388d3031..87c34f607a75 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -50,24 +49,17 @@ struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
- unsigned int mem_size;
- int gpio_irq;
+ struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
- int dma_channel;
- int freq;
- int (*setup)(void __iomem *base, int *freq_ptr);
- struct regulator *regulator;
- u8 flags;
+ struct dma_chan *dma_chan;
};
-static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap2_onenand_dma_complete_func(void *completion)
{
- struct omap2_onenand *c = data;
-
- complete(&c->dma_done);
+ complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
writew(value, c->onenand.base + reg);
}
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (c->flags & ONENAND_IN_OMAP34XX)
- /* Add a delay to let GPIO settle */
- syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
- if (c->gpio_irq) {
- result = gpio_get_value(c->gpio_irq);
- if (result == -1) {
- ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- intr = read_reg(c, ONENAND_REG_INTERRUPT);
- wait_err("gpio error", state, ctrl, intr);
- return -EIO;
- }
- } else
- result = 0;
- if (result == 0) {
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
int retry_cnt = 0;
retry:
- result = wait_for_completion_timeout(&c->irq_done,
- msecs_to_jiffies(20));
- if (result == 0) {
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
return 0;
}
-#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -371,7 +433,7 @@ out_copy:
return 0;
}
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
return -1;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -442,136 +488,6 @@ out_copy:
return 0;
}
-#else
-
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy(buffer, (__force void *)(this->base + bram_offset),
- count);
- return 0;
- }
-
- dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count / 4, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- return 0;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy((__force void *)(this->base + bram_offset), buffer,
- count);
- return 0;
- }
-
- dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
- DMA_TO_DEVICE);
- dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
- count / 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- return 0;
-}
-
-#else
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
-static int omap2_onenand_enable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_enable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't enable regulator\n");
-
- return ret;
-}
-
-static int omap2_onenand_disable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_disable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't disable regulator\n");
-
- return ret;
-}
-
static int omap2_onenand_probe(struct platform_device *pdev)
{
- struct omap_onenand_platform_data *pdata;
- struct omap2_onenand *c;
- struct onenand_chip *this;
- int r;
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "error getting memory resource\n");
+ return -EINVAL;
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
}
- c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
- c->flags = pdata->flags;
- c->gpmc_cs = pdata->cs;
- c->gpio_irq = pdata->gpio_irq;
- c->dma_channel = pdata->dma_channel;
- if (c->dma_channel < 0) {
- /* if -1, don't use DMA */
- c->gpio_irq = 0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- r = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto err_kfree;
- }
-
+ c->gpmc_cs = val;
c->phys_base = res->start;
- c->mem_size = resource_size(res);
-
- if (request_mem_region(c->phys_base, c->mem_size,
- pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
- c->phys_base, c->mem_size);
- r = -EBUSY;
- goto err_kfree;
- }
- c->onenand.base = ioremap(c->phys_base, c->mem_size);
- if (c->onenand.base == NULL) {
- r = -ENOMEM;
- goto err_release_mem_region;
- }
- if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, &c->freq);
- if (r < 0) {
- dev_err(&pdev->dev, "Onenand platform setup failed: "
- "%d\n", r);
- goto err_iounmap;
- }
- c->setup = pdata->onenand_setup;
+ c->onenand.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ r = PTR_ERR(c->int_gpiod);
+ /* Just try again if this happens */
+ if (r != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio: %d\n", r);
+ return r;
}
- if (c->gpio_irq) {
- if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
- dev_err(&pdev->dev, "Failed to request GPIO%d for "
- "OneNAND\n", c->gpio_irq);
- goto err_iounmap;
- }
- gpio_direction_input(c->gpio_irq);
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
- if ((r = request_irq(gpio_to_irq(c->gpio_irq),
- omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
- pdev->dev.driver->name, c)) < 0)
- goto err_release_gpio;
+ c->onenand.wait = omap2_onenand_wait;
}
- if (c->dma_channel >= 0) {
- r = omap_request_dma(0, pdev->dev.driver->name,
- omap2_onenand_dma_cb, (void *) c,
- &c->dma_channel);
- if (r == 0) {
- omap_set_dma_write_mode(c->dma_channel,
- OMAP_DMA_WRITE_NON_POSTED);
- omap_set_dma_src_data_pack(c->dma_channel, 1);
- omap_set_dma_src_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(c->dma_channel, 1);
- omap_set_dma_dest_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- } else {
- dev_info(&pdev->dev,
- "failed to allocate DMA for OneNAND, "
- "using PIO instead\n");
- c->dma_channel = -1;
- }
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
- c->onenand.base, c->freq);
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
c->pdev = pdev;
c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
- c->mtd.dev.parent = &pdev->dev;
- mtd_set_of_node(&c->mtd, pdata->of_node);
-
- this = &c->onenand;
- if (c->dma_channel >= 0) {
- this->wait = omap2_onenand_wait;
- if (c->flags & ONENAND_IN_OMAP34XX) {
- this->read_bufferram = omap3_onenand_read_bufferram;
- this->write_bufferram = omap3_onenand_write_bufferram;
- } else {
- this->read_bufferram = omap2_onenand_read_bufferram;
- this->write_bufferram = omap2_onenand_write_bufferram;
- }
- }
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
- if (pdata->regulator_can_sleep) {
- c->regulator = regulator_get(&pdev->dev, "vonenand");
- if (IS_ERR(c->regulator)) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- r = PTR_ERR(c->regulator);
- goto err_release_dma;
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
}
- c->onenand.enable = omap2_onenand_enable;
- c->onenand.disable = omap2_onenand_disable;
- }
- if (pdata->skip_initial_unlocking)
- this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_regulator;
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
- r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
-err_release_regulator:
- regulator_put(c->regulator);
err_release_dma:
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
- if (c->gpio_irq)
- free_irq(gpio_to_irq(c->gpio_irq), c);
-err_release_gpio:
- if (c->gpio_irq)
- gpio_free(c->gpio_irq);
-err_iounmap:
- iounmap(c->onenand.base);
-err_release_mem_region:
- release_mem_region(c->phys_base, c->mem_size);
-err_kfree:
- kfree(c);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
return r;
}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
- regulator_put(c->regulator);
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
- if (c->gpio_irq) {
- free_irq(gpio_to_irq(c->gpio_irq), c);
- gpio_free(c->gpio_irq);
- }
- iounmap(c->onenand.base);
- release_mem_region(c->phys_base, c->mem_size);
- kfree(c);
return 0;
}
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
},
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1a6d0e367b89..979f4031f23c 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1383,15 +1383,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
@@ -1448,38 +1439,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
/**
- * onenand_read - [MTD Interface] Read data from flash
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put data
- *
- * Read with ecc
-*/
-static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_4KB_PAGE(this) ?
- onenand_mlc_read_ops_nolock(mtd, from, &ops) :
- onenand_read_ops_nolock(mtd, from, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @param mtd: MTD device structure
* @param from: offset to read from
@@ -2056,15 +2015,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to write past end of device\n",
- __func__);
- return -EINVAL;
- }
-
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
@@ -2129,35 +2079,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
}
/**
- * onenand_write - [MTD Interface] write buffer to FLASH
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- *
- * Write with ECC
- */
-static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = (u_char *) buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_WRITING);
- ret = onenand_write_ops_nolock(mtd, to, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @param mtd: MTD device structure
* @param to: offset to write
@@ -4038,8 +3959,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = onenand_read;
- mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a7bf8f..2e9d076e445a 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/mach/flash.h>
-
#include "samsung.h"
enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
- struct resource *base_res;
void __iomem *ahb_addr;
- struct resource *ahb_res;
int bootram_command;
- void __iomem *page_buf;
- void __iomem *oob_buf;
+ void *page_buf;
+ void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
- struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
- m = (unsigned int *) onenand->page_buf;
- s = (unsigned int *) onenand->oob_buf;
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
unsigned char *p;
if (area == ONENAND_DATARAM) {
- p = (unsigned char *) onenand->page_buf;
+ p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
- p = (unsigned char *) onenand->oob_buf;
+ p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
- mtd = kzalloc(size, GFP_KERNEL);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
- onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
- if (!onenand) {
- err = -ENOMEM;
- goto onenand_fail;
- }
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENOENT;
- goto ahb_resource_failed;
- }
+ onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
- onenand->base_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->base_res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- err = -EBUSY;
- goto resource_failed;
- }
+ onenand->phys_base = r->start;
- onenand->base = ioremap(r->start, resource_size(r));
- if (!onenand->base) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
- err = -EFAULT;
- goto ioremap_failed;
- }
/* Set onenand_chip also */
this->base = onenand->base;
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no buffer memory resource defined\n");
- err = -ENOENT;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->ahb_res) {
- dev_err(&pdev->dev, "failed to request buffer memory resource\n");
- err = -EBUSY;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_addr = ioremap(r->start, resource_size(r));
- if (!onenand->ahb_addr) {
- dev_err(&pdev->dev, "failed to map buffer memory resource\n");
- err = -EINVAL;
- goto ahb_ioremap_failed;
- }
+ onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
- onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!onenand->page_buf) {
- err = -ENOMEM;
- goto page_buf_fail;
- }
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
/* Allocate 128 SpareRAM */
- onenand->oob_buf = kzalloc(128, GFP_KERNEL);
- if (!onenand->oob_buf) {
- err = -ENOMEM;
- goto oob_buf_fail;
- }
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no dma memory resource defined\n");
- err = -ENOENT;
- goto dma_resource_failed;
- }
-
- onenand->dma_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->dma_res) {
- dev_err(&pdev->dev, "failed to request dma memory resource\n");
- err = -EBUSY;
- goto dma_resource_failed;
- }
-
- onenand->dma_addr = ioremap(r->start, resource_size(r));
- if (!onenand->dma_addr) {
- dev_err(&pdev->dev, "failed to map dma memory resource\n");
- err = -EINVAL;
- goto dma_ioremap_failed;
- }
-
- onenand->phys_base = onenand->base_res->start;
+ onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
- err = request_irq(r->start, s5pc110_onenand_irq,
- IRQF_SHARED, "onenand", &onenand);
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
- goto scan_failed;
+ return err;
}
}
}
- if (onenand_scan(mtd, 1)) {
- err = -EFAULT;
- goto scan_failed;
- }
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
platform_set_drvdata(pdev, mtd);
return 0;
-
-scan_failed:
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
-dma_ioremap_failed:
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
- kfree(onenand->oob_buf);
-oob_buf_fail:
- kfree(onenand->page_buf);
-page_buf_fail:
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
-ahb_ioremap_failed:
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
-dma_resource_failed:
-ahb_resource_failed:
- iounmap(onenand->base);
-ioremap_failed:
- if (onenand->base_res)
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-resource_failed:
- kfree(onenand);
-onenand_fail:
- kfree(mtd);
- return err;
}
static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
-
- iounmap(onenand->base);
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-
- kfree(onenand->oob_buf);
- kfree(onenand->page_buf);
- kfree(onenand);
- kfree(mtd);
+
return 0;
}
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index 5fe0079ea5ed..8893dc82a5c8 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -192,7 +192,7 @@ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
@@ -219,7 +219,7 @@ exit:
return ret;
}
-void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
@@ -244,7 +244,7 @@ static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
return -EINVAL;
block_num = ftl->log2phy[log_num];
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 75a2bc447a99..4b8e9183489a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -58,6 +58,7 @@ struct cqspi_flash_pdata {
u8 data_width;
u8 cs;
bool registered;
+ bool use_direct_mode;
};
struct cqspi_st {
@@ -68,6 +69,7 @@ struct cqspi_st {
void __iomem *iobase;
void __iomem *ahb_base;
+ resource_size_t ahb_size;
struct completion transfer_complete;
struct mutex bus_mutex;
@@ -103,6 +105,7 @@ struct cqspi_st {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
@@ -450,8 +453,7 @@ static int cqspi_command_write_addr(struct spi_nor *nor,
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_indirect_read_setup(struct spi_nor *nor,
- const unsigned int from_addr)
+static int cqspi_read_setup(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -459,8 +461,6 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
unsigned int dummy_clk = 0;
unsigned int reg;
- writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -493,8 +493,8 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -504,6 +504,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
unsigned int bytes_to_read = 0;
int ret = 0;
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
@@ -570,8 +571,7 @@ failrd:
return ret;
}
-static int cqspi_indirect_write_setup(struct spi_nor *nor,
- const unsigned int to_addr)
+static int cqspi_write_setup(struct spi_nor *nor)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -584,8 +584,6 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
reg = cqspi_calc_rdreg(nor, nor->program_opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (nor->addr_width - 1);
@@ -593,8 +591,8 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx)
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
{
const unsigned int page_size = nor->page_size;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -604,6 +602,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
unsigned int write_bytes;
int ret;
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
@@ -894,17 +893,22 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_indirect_write_setup(nor, to);
+ ret = cqspi_write_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ else
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
if (ret)
return ret;
@@ -914,17 +918,22 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
if (ret)
return ret;
- ret = cqspi_indirect_read_setup(nor, from);
+ ret = cqspi_read_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
return ret;
@@ -1059,6 +1068,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
+ u32 reg;
+
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
@@ -1081,6 +1092,11 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1156,6 +1172,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+ }
}
return 0;
@@ -1215,6 +1237,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index f17d22435bfc..2901c7bd9e30 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -801,10 +801,10 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index ef034d898a23..699951523179 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -138,7 +138,6 @@
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
- * @preopcodes: Preopcodes which are supported.
*/
struct intel_spi {
struct device *dev;
@@ -155,7 +154,6 @@ struct intel_spi {
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
- u8 preopcodes[2];
};
static bool writeable;
@@ -400,10 +398,6 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
}
}
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index abe455ccd68b..5442993b71ff 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -110,7 +110,7 @@
#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-struct mt8173_nor {
+struct mtk_nor {
struct spi_nor nor;
struct device *dev;
void __iomem *base; /* nor flash base address */
@@ -118,48 +118,48 @@ struct mt8173_nor {
struct clk *nor_clk;
};
-static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
{
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
switch (nor->read_proto) {
case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
MTK_NOR_CFG1_REG);
break;
case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
default:
- writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
}
}
-static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
{
int reg;
u8 val = cmdval & 0x1f;
- writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
!(reg & val), 100, 10000);
}
-static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
{
int len = 1 + txlen + rxlen;
int i, ret, idx;
@@ -167,26 +167,26 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
if (len > MTK_NOR_MAX_SHIFT)
return -EINVAL;
- writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
/* start at PRGDATA5, go down to PRGDATA0 */
idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
/* opcode */
- writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
/* program TX data */
for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
/* clear out rest of TX registers */
while (idx >= 0) {
- writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
}
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
if (ret)
return ret;
@@ -195,20 +195,20 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
/* read out RX data */
for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
return 0;
}
/* Do a WRSR (Write Status Register) command */
-static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
{
- writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
}
-static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
{
u8 reg;
@@ -216,27 +216,27 @@ static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
* 0: pre-fetch buffer use for read
* 1: pre-fetch buffer use for page program
*/
- writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
0x01 == (reg & 0x01), 100, 10000);
}
-static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
{
u8 reg;
- writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
10000);
}
-static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
{
u8 val;
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
- val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
switch (nor->addr_width) {
case 3:
@@ -246,115 +246,115 @@ static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
val |= MTK_NOR_4B_ADDR_EN;
break;
default:
- dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
nor->addr_width);
break;
}
- writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
}
-static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
{
int i;
- mt8173_nor_set_addr_width(mt8173_nor);
+ mtk_nor_set_addr_width(mtk_nor);
for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
addr >>= 8;
}
/* Last register is non-contiguous */
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
}
-static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
u8 *buf = (u8 *)buffer;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
/* set mode for fast read mode ,dual mode or quad mode */
- mt8173_nor_set_read_mode(mt8173_nor);
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
- buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
}
return length;
}
-static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
- int addr, int length, u8 *data)
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
{
int i, ret;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
if (ret < 0)
return ret;
}
return 0;
}
-static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
- const u8 *buf)
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
{
int i, bufidx, data;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
bufidx = 0;
for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
buf[bufidx + 1]<<8 | buf[bufidx];
bufidx += 4;
- writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
}
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
}
-static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
size_t i;
- ret = mt8173_nor_write_buffer_enable(mt8173_nor);
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
return ret;
}
for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write buffer failed!\n");
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
return ret;
}
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
}
- ret = mt8173_nor_write_buffer_disable(mt8173_nor);
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
return ret;
}
if (i < len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to,
- (int)(len - i), (u8 *)buf);
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write single byte failed!\n");
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
return ret;
}
}
@@ -362,72 +362,72 @@ static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_RDSR:
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
if (ret < 0)
return ret;
if (len == 1)
- *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
else
- dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
break;
}
return ret;
}
-static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_WRSR:
/* We only handle 1 byte */
- ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
if (ret)
- dev_warn(mt8173_nor->dev, "write reg failure!\n");
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
break;
}
return ret;
}
-static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
{
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
}
-static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
{
int ret;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
return ret;
}
return 0;
}
-static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -439,18 +439,18 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct spi_nor *nor;
/* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
- nor = &mt8173_nor->nor;
- nor->dev = mt8173_nor->dev;
- nor->priv = mt8173_nor;
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
/* fill the hooks to spi nor */
- nor->read = mt8173_nor_read;
- nor->read_reg = mt8173_nor_read_reg;
- nor->write = mt8173_nor_write;
- nor->write_reg = mt8173_nor_write_reg;
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
@@ -465,34 +465,34 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
struct device_node *flash_np;
struct resource *res;
int ret;
- struct mt8173_nor *mt8173_nor;
+ struct mtk_nor *mtk_nor;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
- mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
- if (!mt8173_nor)
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
return -ENOMEM;
- platform_set_drvdata(pdev, mt8173_nor);
+ platform_set_drvdata(pdev, mtk_nor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mt8173_nor->base))
- return PTR_ERR(mt8173_nor->base);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
- mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mt8173_nor->spi_clk))
- return PTR_ERR(mt8173_nor->spi_clk);
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
- mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mt8173_nor->nor_clk))
- return PTR_ERR(mt8173_nor->nor_clk);
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
- mt8173_nor->dev = &pdev->dev;
+ mtk_nor->dev = &pdev->dev;
- ret = mt8173_nor_enable_clk(mt8173_nor);
+ ret = mtk_nor_enable_clk(mtk_nor);
if (ret)
return ret;
@@ -503,20 +503,20 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto nor_free;
}
- ret = mtk_nor_init(mt8173_nor, flash_np);
+ ret = mtk_nor_init(mtk_nor, flash_np);
nor_free:
if (ret)
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return ret;
}
static int mtk_nor_drv_remove(struct platform_device *pdev)
{
- struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
@@ -524,18 +524,18 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- return mt8173_nor_enable_clk(mt8173_nor);
+ return mtk_nor_enable_clk(mtk_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index bc266f70a15b..d445a4d3b770 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -330,8 +330,22 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
- else
- return fsr & FSR_READY;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
@@ -552,6 +566,27 @@ erase_err:
return ret;
}
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -650,7 +685,6 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -714,11 +748,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -735,7 +765,6 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -802,11 +831,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -1020,7 +1045,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1065,7 +1096,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
- /* Spansion -- single (large) sector size only, at least
+ /* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1094,6 +1125,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2713,6 +2746,16 @@ static void spi_nor_resume(struct mtd_info *mtd)
dev_err(dev, "resume() failed\n");
}
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c885a9..cde19c99e77b 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
- if (err == -EUCLEAN)
+ if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b210fdb31c98..b1fc28f63882 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -457,6 +457,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 136ce05d2328..e941395de3ae 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
- if (!max_beb_per1024)
- return 0;
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
/*
* Here we are using size of the entire flash chip and
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 388e46be6ad9..250e30fac61b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
- * leb_write_lock - lock logical eraseblock for writing.
+ * leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b4422a..590d967011bb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_avalible(struct rb_root *root)
+static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 5a832bc79b1b..91705962ba73 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
- p = &av->root.rb_node;
while (*p) {
parent = *p;
@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
- kfree(fm->e[i]);
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 85237cf661f9..3fd8d7ff7a02 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -315,6 +316,10 @@ out_sysfs:
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b5b8cd6f481c..2052a647220e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
- anchor = !anchor_pebs_avalible(&ubi->free);
+ anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
@@ -1529,6 +1529,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
+ bool sync = false;
+
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi_assert(!ubi->lookuptbl[e->pnum]);
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
goto out_free;
- }
}
found_pebs++;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 2aaa3f7f2ba9..a9e2d669acd8 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_avalible(struct rb_root *root);
+static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063b0ea8..6e5cf9d9cd99 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
return dev->of_node == data;
}
+/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
(!args.args_count && (mux_chip->controllers > 1))) {
dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
np, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
np, controller, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
- get_device(&mux_chip->dev);
return &mux_chip->mux[controller];
}
EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0936da592e12..944ec3c9282c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -497,4 +497,15 @@ config THUNDERBOLT_NET
source "drivers/net/hyperv/Kconfig"
+config NETDEVSIM
+ tristate "Simulated networking device"
+ depends on DEBUG_FS
+ help
+ This driver is a developer testing tool and software model that can
+ be used to test various control path networking APIs, especially
+ HW-offload related.
+
+ To compile this driver as a module, choose M here: the module
+ will be called netdevsim.
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 766f62d02a0b..04c3b747812c 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/
thunderbolt-net-y += thunderbolt.o
obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_NETDEVSIM) += netdevsim/
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 8a9b085c2a98..58c705f24f96 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
{
u8 macaddr[ETH_ALEN];
u8 *mac;
- int i;
if (newval->string) {
- i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &macaddr[0], &macaddr[1], &macaddr[2],
- &macaddr[3], &macaddr[4], &macaddr[5]);
- if (i != ETH_ALEN)
+ if (!mac_pton(newval->string, macaddr))
goto err;
mac = macaddr;
} else {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b8029ea03307..433a14b9f731 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -264,7 +264,6 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
}
/* Create payload CAIF frames. */
- pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
while (nfrms < CFHSI_MAX_PKTS) {
struct caif_payload_info *info;
int hpad;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index d065c0e2d18e..406b4847e5dc 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -251,14 +251,14 @@ static void c_can_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct c_can_pci_data c_can_sta2x11= {
+static const struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
.bar = 0,
};
-static struct c_can_pci_data c_can_pch = {
+static const struct c_can_pci_data c_can_pch = {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_32,
.freq = 50000000, /* 50 MHz */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 365a8cc62405..b1779566c5bb 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -27,6 +27,7 @@
#include <linux/can/skb.h>
#include <linux/can/netlink.h>
#include <linux/can/led.h>
+#include <linux/of.h>
#include <net/rtnetlink.h>
#define MOD_DESC "CAN device driver interface"
@@ -411,7 +412,7 @@ EXPORT_SYMBOL_GPL(can_change_state);
* Local echo of CAN messages
*
* CAN network devices *should* support a local echo functionality
- * (see Documentation/networking/can.txt). To test the handling of CAN
+ * (see Documentation/networking/can.rst). To test the handling of CAN
* interfaces that do not support the local echo both driver types are
* implemented. In the case that the driver does not support the echo
* the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
@@ -814,6 +815,29 @@ int open_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(open_candev);
+#ifdef CONFIG_OF
+/* Common function that can be used to understand the limitation of
+ * a transceiver when it provides no means to determine these limitations
+ * at runtime.
+ */
+void of_can_transceiver(struct net_device *dev)
+{
+ struct device_node *dn;
+ struct can_priv *priv = netdev_priv(dev);
+ struct device_node *np = dev->dev.parent->of_node;
+ int ret;
+
+ dn = of_get_child_by_name(np, "can-transceiver");
+ if (!dn)
+ return;
+
+ ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+ if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+ netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+}
+EXPORT_SYMBOL_GPL(of_can_transceiver);
+#endif
+
/*
* Common close function for cleanup before the device gets closed.
*
@@ -913,6 +937,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->bitrate_const_cnt);
if (err)
return err;
+
+ if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
memcpy(&priv->bittiming, &bt, sizeof(bt));
if (priv->do_set_bittiming) {
@@ -997,6 +1028,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
priv->data_bitrate_const_cnt);
if (err)
return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+ priv->bitrate_max);
+ return -EINVAL;
+ }
+
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
if (priv->do_set_data_bittiming) {
@@ -1064,6 +1102,7 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt);
+ size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
return size;
}
@@ -1121,7 +1160,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt,
- priv->data_bitrate_const))
+ priv->data_bitrate_const)) ||
+
+ (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+ sizeof(priv->bitrate_max),
+ &priv->bitrate_max))
)
return -EMSGSIZE;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0626dcfd1f3d..634c51e6b8ae 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -190,6 +190,7 @@
* MX53 FlexCAN2 03.00.00.00 yes no no no no
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
* VF610 FlexCAN3 ? no yes no yes yes?
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -279,6 +280,10 @@ struct flexcan_priv {
struct clk *clk_per;
const struct flexcan_devtype_data *devtype_data;
struct regulator *reg_xceiver;
+
+ /* Read and Write APIs */
+ u32 (*read)(void __iomem *addr);
+ void (*write)(u32 val, void __iomem *addr);
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@ -301,6 +306,12 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
+static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -313,39 +324,45 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
.brp_inc = 1,
};
-/* Abstract off the read/write for arm versus ppc. This
- * assumes that PPC uses big-endian registers and everything
- * else uses little-endian registers, independent of CPU
- * endianness.
+/* FlexCAN module is essentially modelled as a little-endian IP in most
+ * SoCs, i.e the registers as well as the message buffer areas are
+ * implemented in a little-endian fashion.
+ *
+ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
+ * module in a big-endian fashion (i.e the registers as well as the
+ * message buffer areas are implemented in a big-endian way).
+ *
+ * In addition, the FlexCAN module can be found on SoCs having ARM or
+ * PPC cores. So, we need to abstract off the register read/write
+ * functions, ensuring that these cater to all the combinations of module
+ * endianness and underlying CPU endianness.
*/
-#if defined(CONFIG_PPC)
-static inline u32 flexcan_read(void __iomem *addr)
+static inline u32 flexcan_read_be(void __iomem *addr)
{
- return in_be32(addr);
+ return ioread32be(addr);
}
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_be(u32 val, void __iomem *addr)
{
- out_be32(addr, val);
+ iowrite32be(val, addr);
}
-#else
-static inline u32 flexcan_read(void __iomem *addr)
+
+static inline u32 flexcan_read_le(void __iomem *addr)
{
- return readl(addr);
+ return ioread32(addr);
}
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_le(u32 val, void __iomem *addr)
{
- writel(val, addr);
+ iowrite32(val, addr);
}
-#endif
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
}
static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
@@ -353,7 +370,7 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
}
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
@@ -378,14 +395,14 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
return -ETIMEDOUT;
return 0;
@@ -397,14 +414,14 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
udelay(10);
- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
return -ETIMEDOUT;
return 0;
@@ -416,14 +433,14 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
udelay(100);
- if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
return -ETIMEDOUT;
return 0;
@@ -435,14 +452,14 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
return -ETIMEDOUT;
return 0;
@@ -453,11 +470,11 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
- flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
- while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+ priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
udelay(10);
- if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+ if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
return -ETIMEDOUT;
return 0;
@@ -468,7 +485,7 @@ static int __flexcan_get_berr_counter(const struct net_device *dev,
{
const struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
- u32 reg = flexcan_read(&regs->ecr);
+ u32 reg = priv->read(&regs->ecr);
bec->txerr = (reg >> 0) & 0xff;
bec->rxerr = (reg >> 8) & 0xff;
@@ -524,24 +541,24 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (cf->can_dlc > 0) {
data = be32_to_cpup((__be32 *)&cf->data[0]);
- flexcan_write(data, &priv->tx_mb->data[0]);
+ priv->write(data, &priv->tx_mb->data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
- flexcan_write(data, &priv->tx_mb->data[1]);
+ priv->write(data, &priv->tx_mb->data[1]);
}
can_put_echo_skb(skb, dev, 0);
- flexcan_write(can_id, &priv->tx_mb->can_id);
- flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+ priv->write(can_id, &priv->tx_mb->can_id);
+ priv->write(ctrl, &priv->tx_mb->can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
*/
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb_reserved->can_ctrl);
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb_reserved->can_ctrl);
return NETDEV_TX_OK;
@@ -660,7 +677,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
u32 code;
do {
- reg_ctrl = flexcan_read(&mb->can_ctrl);
+ reg_ctrl = priv->read(&mb->can_ctrl);
} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
/* is this MB empty? */
@@ -675,17 +692,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
offload->dev->stats.rx_errors++;
}
} else {
- reg_iflag1 = flexcan_read(&regs->iflag1);
+ reg_iflag1 = priv->read(&regs->iflag1);
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
return 0;
- reg_ctrl = flexcan_read(&mb->can_ctrl);
+ reg_ctrl = priv->read(&mb->can_ctrl);
}
/* increase timstamp to full 32 bit */
*timestamp = reg_ctrl << 16;
- reg_id = flexcan_read(&mb->can_id);
+ reg_id = priv->read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
@@ -695,19 +712,19 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
- *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
- *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
+ *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
/* mark as read */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
/* Clear IRQ */
if (n < 32)
- flexcan_write(BIT(n), &regs->iflag1);
+ priv->write(BIT(n), &regs->iflag1);
else
- flexcan_write(BIT(n - 32), &regs->iflag2);
+ priv->write(BIT(n - 32), &regs->iflag2);
} else {
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- flexcan_read(&regs->timer);
+ priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+ priv->read(&regs->timer);
}
return 1;
@@ -719,8 +736,8 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs;
u32 iflag1, iflag2;
- iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
- iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+ iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
+ iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
return (u64)iflag2 << 32 | iflag1;
@@ -736,7 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
u32 reg_iflag1, reg_esr;
enum can_state last_state = priv->can.state;
- reg_iflag1 = flexcan_read(&regs->iflag1);
+ reg_iflag1 = priv->read(&regs->iflag1);
/* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -759,7 +776,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* FIFO overflow interrupt */
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
handled = IRQ_HANDLED;
- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+ priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
+ &regs->iflag1);
dev->stats.rx_over_errors++;
dev->stats.rx_errors++;
}
@@ -773,18 +791,18 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
- flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb->can_ctrl);
+ priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
netif_wake_queue(dev);
}
- reg_esr = flexcan_read(&regs->esr);
+ reg_esr = priv->read(&regs->esr);
/* ACK all bus error and state change IRQ sources */
if (reg_esr & FLEXCAN_ESR_ALL_INT) {
handled = IRQ_HANDLED;
- flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
}
/* state change interrupt or broken error state quirk fix is enabled */
@@ -846,7 +864,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg;
- reg = flexcan_read(&regs->ctrl);
+ reg = priv->read(&regs->ctrl);
reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
@@ -870,11 +888,11 @@ static void flexcan_set_bittiming(struct net_device *dev)
reg |= FLEXCAN_CTRL_SMP;
netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
- flexcan_write(reg, &regs->ctrl);
+ priv->write(reg, &regs->ctrl);
/* print chip status */
netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
}
/* flexcan_chip_start
@@ -913,7 +931,7 @@ static int flexcan_chip_start(struct net_device *dev)
* choose format C
* set max mailbox number
*/
- reg_mcr = flexcan_read(&regs->mcr);
+ reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
@@ -927,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev)
FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
}
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
- flexcan_write(reg_mcr, &regs->mcr);
+ priv->write(reg_mcr, &regs->mcr);
/* CTRL
*
@@ -940,7 +958,7 @@ static int flexcan_chip_start(struct net_device *dev)
* enable bus off interrupt
* (== FLEXCAN_CTRL_ERR_STATE)
*/
- reg_ctrl = flexcan_read(&regs->ctrl);
+ reg_ctrl = priv->read(&regs->ctrl);
reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
FLEXCAN_CTRL_ERR_STATE;
@@ -960,45 +978,45 @@ static int flexcan_chip_start(struct net_device *dev)
/* leave interrupts disabled for now */
reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
- flexcan_write(reg_ctrl, &regs->ctrl);
+ priv->write(reg_ctrl, &regs->ctrl);
if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
- reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
- flexcan_write(reg_ctrl2, &regs->ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
/* clear and invalidate all mailboxes first */
for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
- flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
- &regs->mb[i].can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+ &regs->mb[i].can_ctrl);
}
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
- flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
- &regs->mb[i].can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
+ &regs->mb[i].can_ctrl);
}
/* Errata ERR005829: mark first TX mailbox as INACTIVE */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb_reserved->can_ctrl);
/* mark TX mailbox as INACTIVE */
- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb->can_ctrl);
+ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ &priv->tx_mb->can_ctrl);
/* acceptance mask/acceptance code (accept everything) */
- flexcan_write(0x0, &regs->rxgmask);
- flexcan_write(0x0, &regs->rx14mask);
- flexcan_write(0x0, &regs->rx15mask);
+ priv->write(0x0, &regs->rxgmask);
+ priv->write(0x0, &regs->rx14mask);
+ priv->write(0x0, &regs->rx15mask);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
- flexcan_write(0x0, &regs->rxfgmask);
+ priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
- flexcan_write(0, &regs->rximr[i]);
+ priv->write(0, &regs->rximr[i]);
/* On Vybrid, disable memory error detection interrupts
* and freeze mode.
@@ -1011,16 +1029,16 @@ static int flexcan_chip_start(struct net_device *dev)
* and Correction of Memory Errors" to write to
* MECR register
*/
- reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
- flexcan_write(reg_ctrl2, &regs->ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
- reg_mecr = flexcan_read(&regs->mecr);
+ reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
- flexcan_write(reg_mecr, &regs->mecr);
+ priv->write(reg_mecr, &regs->mecr);
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
- flexcan_write(reg_mecr, &regs->mecr);
+ priv->write(reg_mecr, &regs->mecr);
}
err = flexcan_transceiver_enable(priv);
@@ -1036,14 +1054,14 @@ static int flexcan_chip_start(struct net_device *dev)
/* enable interrupts atomically */
disable_irq(dev->irq);
- flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
- flexcan_write(priv->reg_imask1_default, &regs->imask1);
- flexcan_write(priv->reg_imask2_default, &regs->imask2);
+ priv->write(priv->reg_ctrl_default, &regs->ctrl);
+ priv->write(priv->reg_imask1_default, &regs->imask1);
+ priv->write(priv->reg_imask2_default, &regs->imask2);
enable_irq(dev->irq);
/* print chip status */
netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
- flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
return 0;
@@ -1068,10 +1086,10 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_chip_disable(priv);
/* Disable all interrupts */
- flexcan_write(0, &regs->imask2);
- flexcan_write(0, &regs->imask1);
- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
- &regs->ctrl);
+ priv->write(0, &regs->imask2);
+ priv->write(0, &regs->imask1);
+ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ &regs->ctrl);
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
@@ -1186,26 +1204,26 @@ static int register_flexcandev(struct net_device *dev)
err = flexcan_chip_disable(priv);
if (err)
goto out_disable_per;
- reg = flexcan_read(&regs->ctrl);
+ reg = priv->read(&regs->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
- flexcan_write(reg, &regs->ctrl);
+ priv->write(reg, &regs->ctrl);
err = flexcan_chip_enable(priv);
if (err)
goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
- flexcan_write(reg, &regs->mcr);
+ priv->write(reg, &regs->mcr);
/* Currently we only support newer versions of this core
* featuring a RX hardware FIFO (although this driver doesn't
* make use of it on some cores). Older cores, found on some
* Coldfire derivates are not tested.
*/
- reg = flexcan_read(&regs->mcr);
+ reg = priv->read(&regs->mcr);
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
@@ -1233,8 +1251,12 @@ static void unregister_flexcandev(struct net_device *dev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+ { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1314,6 +1336,21 @@ static int flexcan_probe(struct platform_device *pdev)
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
+
+ if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+ priv->read = flexcan_read_be;
+ priv->write = flexcan_write_be;
+ } else {
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "fsl,p1010-flexcan")) {
+ priv->read = flexcan_read_be;
+ priv->write = flexcan_write_be;
+ } else {
+ priv->read = flexcan_read_le;
+ priv->write = flexcan_write_le;
+ }
+ }
+
priv->can.clock.freq = clock_freq;
priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index f4947a74b65f..2594f7779c6f 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/iopoll.h>
#include <linux/can/dev.h>
@@ -126,6 +127,12 @@ enum m_can_mram_cfg {
#define DBTP_DSJW_SHIFT 0
#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
+/* Transmitter Delay Compensation Register (TDCR) */
+#define TDCR_TDCO_SHIFT 8
+#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
+#define TDCR_TDCF_SHIFT 0
+#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
+
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
@@ -625,21 +632,16 @@ static int m_can_clk_start(struct m_can_priv *priv)
{
int err;
- err = clk_prepare_enable(priv->hclk);
+ err = pm_runtime_get_sync(priv->device);
if (err)
- return err;
-
- err = clk_prepare_enable(priv->cclk);
- if (err)
- clk_disable_unprepare(priv->hclk);
+ pm_runtime_put_noidle(priv->device);
return err;
}
static void m_can_clk_stop(struct m_can_priv *priv)
{
- clk_disable_unprepare(priv->cclk);
- clk_disable_unprepare(priv->hclk);
+ pm_runtime_put_sync(priv->device);
}
static int m_can_get_berr_counter(const struct net_device *dev,
@@ -987,13 +989,47 @@ static int m_can_set_bittiming(struct net_device *dev)
m_can_write(priv, M_CAN_NBTP, reg_btp);
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_btp = 0;
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- reg_btp = (brp << DBTP_DBRP_SHIFT) | (sjw << DBTP_DSJW_SHIFT) |
- (tseg1 << DBTP_DTSEG1_SHIFT) |
- (tseg2 << DBTP_DTSEG2_SHIFT);
+
+ /* TDC is only needed for bitrates beyond 2.5 MBit/s.
+ * This is mentioned in the "Bit Time Requirements for CAN FD"
+ * paper presented at the International CAN Conference 2013
+ */
+ if (dbt->bitrate > 2500000) {
+ u32 tdco, ssp;
+
+ /* Use the same value of secondary sampling point
+ * as the data sampling point
+ */
+ ssp = dbt->sample_point;
+
+ /* Equation based on Bosch's M_CAN User Manual's
+ * Transmitter Delay Compensation Section
+ */
+ tdco = (priv->can.clock.freq / 1000) *
+ ssp / dbt->bitrate;
+
+ /* Max valid TDCO value is 127 */
+ if (tdco > 127) {
+ netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
+ tdco);
+ tdco = 127;
+ }
+
+ reg_btp |= DBTP_TDC;
+ m_can_write(priv, M_CAN_TDCR,
+ tdco << TDCR_TDCO_SHIFT);
+ }
+
+ reg_btp |= (brp << DBTP_DBRP_SHIFT) |
+ (sjw << DBTP_DSJW_SHIFT) |
+ (tseg1 << DBTP_DTSEG1_SHIFT) |
+ (tseg2 << DBTP_DTSEG2_SHIFT);
+
m_can_write(priv, M_CAN_DBTP, reg_btp);
}
@@ -1143,11 +1179,6 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
return 0;
}
-static void free_m_can_dev(struct net_device *dev)
-{
- free_candev(dev);
-}
-
/* Checks core release number of M_CAN
* returns 0 if an unsupported device is detected
* else it returns the release and step coded as:
@@ -1207,31 +1238,20 @@ static bool m_can_niso_supported(const struct m_can_priv *priv)
return !niso_timeout;
}
-static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
- void __iomem *addr, u32 tx_fifo_size)
+static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
+ void __iomem *addr)
{
- struct net_device *dev;
struct m_can_priv *priv;
int m_can_version;
- unsigned int echo_buffer_count;
m_can_version = m_can_check_core_release(addr);
/* return if unsupported version */
if (!m_can_version) {
- dev = NULL;
- goto return_dev;
+ dev_err(&pdev->dev, "Unsupported version number: %2d",
+ m_can_version);
+ return -EINVAL;
}
- /* If version < 3.1.x, then only one echo buffer is used */
- echo_buffer_count = ((m_can_version == 30)
- ? 1U
- : (unsigned int)tx_fifo_size);
-
- dev = alloc_candev(sizeof(*priv), echo_buffer_count);
- if (!dev) {
- dev = NULL;
- goto return_dev;
- }
priv = netdev_priv(dev);
netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
@@ -1273,16 +1293,12 @@ static struct net_device *alloc_m_can_dev(struct platform_device *pdev,
: 0);
break;
default:
- /* Unsupported device: free candev */
- free_m_can_dev(dev);
dev_err(&pdev->dev, "Unsupported version number: %2d",
priv->version);
- dev = NULL;
- break;
+ return -EINVAL;
}
-return_dev:
- return dev;
+ return 0;
}
static int m_can_open(struct net_device *dev)
@@ -1574,37 +1590,26 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto failed_ret;
}
- /* Enable clocks. Necessary to read Core Release in order to determine
- * M_CAN version
- */
- ret = clk_prepare_enable(hclk);
- if (ret)
- goto disable_hclk_ret;
-
- ret = clk_prepare_enable(cclk);
- if (ret)
- goto disable_cclk_ret;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
addr = devm_ioremap_resource(&pdev->dev, res);
irq = platform_get_irq_byname(pdev, "int0");
if (IS_ERR(addr) || irq < 0) {
ret = -EINVAL;
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
ret = -ENODEV;
- goto disable_cclk_ret;
+ goto failed_ret;
}
mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!mram_addr) {
ret = -ENOMEM;
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* get message ram configuration */
@@ -1613,7 +1618,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(&pdev->dev, "Could not get Message RAM configuration.");
- goto disable_cclk_ret;
+ goto failed_ret;
}
/* Get TX FIFO size
@@ -1622,11 +1627,12 @@ static int m_can_plat_probe(struct platform_device *pdev)
tx_fifo_size = mram_config_vals[7];
/* allocate the m_can device */
- dev = alloc_m_can_dev(pdev, addr, tx_fifo_size);
+ dev = alloc_candev(sizeof(*priv), tx_fifo_size);
if (!dev) {
ret = -ENOMEM;
- goto disable_cclk_ret;
+ goto failed_ret;
}
+
priv = netdev_priv(dev);
dev->irq = irq;
priv->device = &pdev->dev;
@@ -1640,30 +1646,42 @@ static int m_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
+ /* Enable clocks. Necessary to read Core Release in order to determine
+ * M_CAN version
+ */
+ pm_runtime_enable(&pdev->dev);
+ ret = m_can_clk_start(priv);
+ if (ret)
+ goto pm_runtime_fail;
+
+ ret = m_can_dev_setup(pdev, dev, addr);
+ if (ret)
+ goto clk_disable;
+
ret = register_m_can_dev(dev);
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto failed_free_dev;
+ goto clk_disable;
}
devm_can_led_init(dev);
+ of_can_transceiver(dev);
+
dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
KBUILD_MODNAME, dev->irq, priv->version);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
-
- goto disable_cclk_ret;
-
-failed_free_dev:
- free_m_can_dev(dev);
-disable_cclk_ret:
- clk_disable_unprepare(cclk);
-disable_hclk_ret:
- clk_disable_unprepare(hclk);
+clk_disable:
+ m_can_clk_stop(priv);
+pm_runtime_fail:
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ free_candev(dev);
+ }
failed_ret:
return ret;
}
@@ -1721,14 +1739,47 @@ static int m_can_plat_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
unregister_m_can_dev(dev);
+
+ pm_runtime_disable(&pdev->dev);
+
platform_set_drvdata(pdev, NULL);
- free_m_can_dev(dev);
+ free_candev(dev);
return 0;
}
+static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->cclk);
+ clk_disable_unprepare(priv->hclk);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->hclk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->cclk);
+ if (err)
+ clk_disable_unprepare(priv->hclk);
+
+ return err;
+}
+
static const struct dev_pm_ops m_can_pmops = {
+ SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
+ m_can_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
};
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index f394f77d7528..d94dae216820 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -256,7 +256,7 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
weight = offload->mb_first - offload->mb_last;
}
- return can_rx_offload_init_queue(dev, offload, weight);;
+ return can_rx_offload_init_queue(dev, offload, weight);
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 5d067c1b987f..89d60d8e467c 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -508,7 +508,7 @@ static void slc_sync(void)
}
/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(dev_t line)
+static struct slcan *slc_alloc(void)
{
int i;
char name[IFNAMSIZ];
@@ -583,7 +583,7 @@ static int slcan_open(struct tty_struct *tty)
/* OK. Find a free SLCAN channel to use. */
err = -ENFILE;
- sl = slc_alloc(tty_devnum(tty));
+ sl = slc_alloc();
if (sl == NULL)
goto err_exit;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index f3f05fea8e1f..98d118b3aaf4 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -612,8 +612,7 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
return 0;
}
-static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
- struct spi_device *spi)
+static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
{
mcp251x_do_set_bittiming(net);
@@ -775,7 +774,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mcp251x_hw_reset(spi);
- mcp251x_setup(net, priv, spi);
+ mcp251x_setup(net, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -971,7 +970,7 @@ static int mcp251x_open(struct net_device *net)
mcp251x_open_clean(net);
goto open_unlock;
}
- ret = mcp251x_setup(net, priv, spi);
+ ret = mcp251x_setup(net, spi);
if (ret) {
mcp251x_open_clean(net);
goto open_unlock;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b00358297424..12ff0020ecd6 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e88a8ce..17c21ad3b95e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -243,7 +243,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *gsdev)
{
struct gs_device_mode *dm;
struct usb_interface *intf = gsdev->iface;
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
rc);
- return rc;
+ return (rc > 0) ? 0 : rc;
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -709,7 +709,7 @@ static int gs_can_close(struct net_device *netdev)
atomic_set(&dev->active_tx_urbs, 0);
/* reset the device */
- rc = gs_cmd_reset(parent, dev);
+ rc = gs_cmd_reset(dev);
if (rc < 0)
netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25a9b79cc42d..f530a80f5051 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -408,7 +408,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
{
struct sk_buff *skb;
struct can_frame *cf;
- struct timeval tv;
enum can_state new_state;
/* ignore this error until 1st ts received */
@@ -525,8 +524,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
- peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16,
+ &hwts->hwtstamp);
}
mc->netdev->stats.rx_packets++;
@@ -610,7 +609,6 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
struct sk_buff *skb;
struct can_frame *cf;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(mc->netdev, &cf);
@@ -658,9 +656,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
}
/* convert timestamp into kernel time */
- peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
/* update statistics */
mc->netdev->stats.rx_packets++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1ca76e03e965..50e911428638 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -80,21 +80,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
}
}
-static void peak_usb_add_us(struct timeval *tv, u32 delta_us)
-{
- /* number of s. to add to final time */
- u32 delta_s = delta_us / 1000000;
-
- delta_us -= delta_s * 1000000;
-
- tv->tv_usec += delta_us;
- if (tv->tv_usec >= 1000000) {
- tv->tv_usec -= 1000000;
- delta_s++;
- }
- tv->tv_sec += delta_s;
-}
-
/*
* sometimes, another now may be more recent than current one...
*/
@@ -103,7 +88,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
time_ref->ts_dev_2 = ts_now;
/* should wait at least two passes before computing */
- if (time_ref->tv_host.tv_sec > 0) {
+ if (ktime_to_ns(time_ref->tv_host) > 0) {
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
@@ -118,26 +103,26 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
*/
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
{
- if (time_ref->tv_host_0.tv_sec == 0) {
+ if (ktime_to_ns(time_ref->tv_host_0) == 0) {
/* use monotonic clock to correctly compute further deltas */
- time_ref->tv_host_0 = ktime_to_timeval(ktime_get());
- time_ref->tv_host.tv_sec = 0;
+ time_ref->tv_host_0 = ktime_get();
+ time_ref->tv_host = ktime_set(0, 0);
} else {
/*
- * delta_us should not be >= 2^32 => delta_s should be < 4294
+ * delta_us should not be >= 2^32 => delta should be < 4294s
* handle 32-bits wrapping here: if count of s. reaches 4200,
* reset counters and change time base
*/
- if (time_ref->tv_host.tv_sec != 0) {
- u32 delta_s = time_ref->tv_host.tv_sec
- - time_ref->tv_host_0.tv_sec;
- if (delta_s > 4200) {
+ if (ktime_to_ns(time_ref->tv_host)) {
+ ktime_t delta = ktime_sub(time_ref->tv_host,
+ time_ref->tv_host_0);
+ if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) {
time_ref->tv_host_0 = time_ref->tv_host;
time_ref->ts_total = 0;
}
}
- time_ref->tv_host = ktime_to_timeval(ktime_get());
+ time_ref->tv_host = ktime_get();
time_ref->tick_count++;
}
@@ -146,13 +131,12 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
}
/*
- * compute timeval according to current ts and time_ref data
+ * compute time according to current ts and time_ref data
*/
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
- struct timeval *tv)
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
{
- /* protect from getting timeval before setting now */
- if (time_ref->tv_host.tv_sec > 0) {
+ /* protect from getting time before setting now */
+ if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
delta_us = ts - time_ref->ts_dev_2;
@@ -164,10 +148,9 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
delta_us *= time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
- *tv = time_ref->tv_host_0;
- peak_usb_add_us(tv, (u32)delta_us);
+ *time = ktime_add_us(time_ref->tv_host_0, delta_us);
} else {
- *tv = ktime_to_timeval(ktime_get());
+ *time = ktime_get();
}
}
@@ -175,13 +158,11 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
* post received skb after having set any hw timestamp
*/
int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
+ struct peak_time_ref *time_ref, u32 ts_low)
{
struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
- struct timeval tv;
- peak_usb_get_ts_tv(time_ref, ts_low, &tv);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp);
return netif_rx(skb);
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c01316cac354..fb23489e1cb8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -96,7 +96,7 @@ extern const struct peak_usb_adapter pcan_usb_pro_fd;
extern const struct peak_usb_adapter pcan_usb_x6;
struct peak_time_ref {
- struct timeval tv_host_0, tv_host;
+ ktime_t tv_host_0, tv_host;
u32 ts_dev_1, ts_dev_2;
u64 ts_total;
u32 tick_count;
@@ -151,10 +151,9 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
const struct peak_usb_adapter *adapter);
void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
- struct timeval *tv);
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
+ struct peak_time_ref *time_ref, u32 ts_low);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e30c98..dd161c5eea8e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
- int i, n = 1, packet_len;
+ int packet_len;
ptrdiff_t cmd_len;
/* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr = cmd_head;
+ packet_len = cmd_len;
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
- if ((dev->udev->speed != USB_SPEED_HIGH) &&
- (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
- packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
- n += cmd_len / packet_len;
- } else {
- packet_len = cmd_len;
- }
+ if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
+ packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
- for (i = 0; i < n; i++) {
+ do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr += packet_len;
- }
+ cmd_len -= packet_len;
+
+ if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
+ packet_len = cmd_len;
+
+ } while (packet_len > 0);
return err;
}
@@ -513,8 +514,7 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
else
memcpy(cfd->data, rm->d, cfd->len);
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
@@ -574,8 +574,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->can_dlc;
@@ -617,8 +616,7 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- peak_usb_netif_rx(skb, &usb_if->time_ref,
- le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
+ peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
netdev->stats.rx_over_errors++;
netdev->stats.rx_errors++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index bbdd6058cd2f..0105fbfea273 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -531,7 +531,6 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
struct net_device *netdev = dev->netdev;
struct can_frame *can_frame;
struct sk_buff *skb;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
skb = alloc_can_skb(netdev, &can_frame);
@@ -549,9 +548,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
else
memcpy(can_frame->data, rx->data, can_frame->can_dlc);
- peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
+ &hwts->hwtstamp);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
@@ -571,7 +570,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
u8 err_mask = 0;
struct sk_buff *skb;
- struct timeval tv;
struct skb_shared_hwtstamps *hwts;
/* nothing should be sent while in BUS_OFF state */
@@ -667,9 +665,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
dev->can.state = new_state;
- peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
hwts = skb_hwtstamps(skb);
- hwts->hwtstamp = timeval_to_ktime(tv);
+ peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += can_frame->can_dlc;
netif_rx(skb);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a8cb33264ff1..c2b04f505e16 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -61,7 +61,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME);
/*
* CAN test feature:
* Enable the echo on driver level for testing the CAN core echo modes.
- * See Documentation/networking/can.txt for details.
+ * See Documentation/networking/can.rst for details.
*/
static bool echo; /* echo testing. Default: 0 (Off) */
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e8852a0f..ed6828821fbd 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
tbp = peer_tb;
}
- if (tbp[IFLA_IFNAME]) {
+ if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
@@ -227,10 +227,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
netif_carrier_off(peer);
err = rtnl_configure_link(peer, ifmp);
- if (err < 0) {
- unregister_netdevice(peer);
- return err;
- }
+ if (err < 0)
+ goto unregister_network_device;
/* register first device */
if (tb[IFLA_IFNAME])
@@ -239,10 +237,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
err = register_netdevice(dev);
- if (err < 0) {
- unregister_netdevice(peer);
- return err;
- }
+ if (err < 0)
+ goto unregister_network_device;
netif_carrier_off(dev);
@@ -254,6 +250,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
rcu_assign_pointer(priv->peer, dev);
return 0;
+
+unregister_network_device:
+ unregister_netdevice(peer);
+ return err;
}
static void vxcan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 83a9bc892a3b..2b81b97e994f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,7 +33,7 @@ config NET_DSA_MT7530
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA
+ depends on NET_DSA && NET_DSA_LEGACY
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd96fd75..db830a1141d9 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1029,8 +1029,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
EXPORT_SYMBOL(b53_vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
@@ -1047,8 +1046,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_vlan_prepare);
void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1495,15 +1493,17 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
return false;
}
-static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
- int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- /* Older models support a different tag format that we do not
- * support in net/dsa/tag_brcm.c yet.
+ /* Older models (5325, 5365) support a different tag format that we do
+ * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+ * mode to be turned on which means we need to specifically manage ARL
+ * misses on multicast addresses (TBD).
*/
- if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+ if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+ !b53_can_enable_brcm_tags(ds, port))
return DSA_TAG_PROTO_NONE;
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
@@ -1514,6 +1514,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_BRCM;
}
+EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index daaaa1ecb996..d954cf36ecd8 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -295,11 +295,9 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
void b53_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
+ const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -310,6 +308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b62d47210db8..0378eded31f2 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -34,12 +34,6 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
-static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
- int port)
-{
- return DSA_TAG_PROTO_BRCM;
-}
-
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -860,7 +854,7 @@ static const struct b53_io_ops bcm_sf2_io_ops = {
};
static const struct dsa_switch_ops bcm_sf2_ops = {
- .get_tag_protocol = bcm_sf2_sw_get_tag_protocol,
+ .get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
@@ -954,6 +948,9 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7278-switch-v4.0",
.data = &bcm_sf2_7278_data
},
+ { .compatible = "brcm,bcm7278-switch-v4.8",
+ .data = &bcm_sf2_7278_data
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bb71d3d6f65b..7aa84ee4e771 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -174,9 +174,9 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int
+dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
@@ -193,8 +193,7 @@ static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
}
static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b24566bb74d2..6171c0853ff1 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -249,6 +249,28 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
return -EIO;
}
+static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
+{
+ int i;
+
+ for (i = 0; i < 25; i++) {
+ u32 reg;
+ int ret;
+
+ ret = lan9303_read(chip->regmap, offset, &reg);
+ if (ret) {
+ dev_err(chip->dev, "%s failed to read offset %d: %d\n",
+ __func__, offset, ret);
+ return ret;
+ }
+ if (!(reg & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
{
int ret;
@@ -274,22 +296,8 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
{
- int ret, i;
- u32 reg;
-
- for (i = 0; i < 25; i++) {
- ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, &reg);
- if (ret) {
- dev_err(chip->dev,
- "Failed to read pmi access status: %d\n", ret);
- return ret;
- }
- if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -EIO;
+ return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
+ LAN9303_PMI_ACCESS_MII_BUSY);
}
static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
@@ -366,22 +374,8 @@ EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
{
- int ret, i;
- u32 reg;
-
- for (i = 0; i < 25; i++) {
- ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, &reg);
- if (ret) {
- dev_err(chip->dev,
- "Failed to read csr command status: %d\n", ret);
- return ret;
- }
- if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -EIO;
+ return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
+ LAN9303_SWITCH_CSR_CMD_BUSY);
}
static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
@@ -485,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
{
int reg;
- /* depending on the 'phy_addr_sel_strap' setting, the three phys are
+ /* Calculate chip->phy_addr_base:
+ * Depending on the 'phy_addr_sel_strap' setting, the three phys are
* using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
* 'phy_addr_sel_strap' setting directly, so we need a test, which
* configuration is active:
@@ -500,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
return reg;
}
- if ((reg != 0) && (reg != 0xffff))
- chip->phy_addr_sel_strap = 1;
- else
- chip->phy_addr_sel_strap = 0;
+ chip->phy_addr_base = reg != 0 && reg != 0xffff;
dev_dbg(chip->dev, "Phy setup '%s' detected\n",
- chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+ chip->phy_addr_base ? "1-2-3" : "0-1-2");
return 0;
}
@@ -546,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
return NULL;
}
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
- int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
{
int i;
- for (i = 0; i < 0x1000; i++) {
+ for (i = 0; i < 25; i++) {
u32 reg;
lan9303_read_switch_reg(chip, regno, &reg);
- if ((reg & mask) == value)
+ if (!(reg & mask))
return 0;
usleep_range(1000, 2000);
}
+
return -ETIMEDOUT;
}
@@ -569,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_MAKE_ENTRY);
- lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
- 0);
+ lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
return 0;
@@ -583,6 +573,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
{
int i;
+ mutex_lock(&chip->alr_mutex);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
LAN9303_ALR_CMD_GET_FIRST);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
@@ -606,6 +597,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
LAN9303_ALR_CMD_GET_NEXT);
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
}
+ mutex_unlock(&chip->alr_mutex);
}
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -694,16 +686,20 @@ static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
{
struct lan9303_alr_cache_entry *entr;
+ mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr) { /*New entry */
entr = lan9303_alr_cache_find_free(chip);
- if (!entr)
+ if (!entr) {
+ mutex_unlock(&chip->alr_mutex);
return -ENOSPC;
+ }
ether_addr_copy(entr->mac_addr, mac);
}
entr->port_map |= BIT(port);
entr->stp_override = stp_override;
lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+ mutex_unlock(&chip->alr_mutex);
return 0;
}
@@ -713,15 +709,18 @@ static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
{
struct lan9303_alr_cache_entry *entr;
+ mutex_lock(&chip->alr_mutex);
entr = lan9303_alr_cache_find_mac(chip, mac);
if (!entr)
- return 0; /* no static entry found */
+ goto out; /* no static entry found */
entr->port_map &= ~BIT(port);
if (entr->port_map == 0) /* zero means its free again */
eth_zero_addr(entr->mac_addr);
lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
+out:
+ mutex_unlock(&chip->alr_mutex);
return 0;
}
@@ -818,18 +817,16 @@ static void lan9303_bridge_ports(struct lan9303 *chip)
lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
}
-static int lan9303_handle_reset(struct lan9303 *chip)
+static void lan9303_handle_reset(struct lan9303 *chip)
{
if (!chip->reset_gpio)
- return 0;
+ return;
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
/* release (deassert) reset and activate the device */
gpiod_set_value_cansleep(chip->reset_gpio, 0);
-
- return 0;
}
/* stop processing packets for all ports */
@@ -866,7 +863,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if ((reg >> 16) != LAN9303_CHIP_ID) {
dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
reg >> 16);
- return ret;
+ return -ENODEV;
}
/* The default state of the LAN9303 device is to forward packets between
@@ -1018,7 +1015,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds)
static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct lan9303 *chip = ds->priv;
- int phy_base = chip->phy_addr_sel_strap;
+ int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_read(chip, regnum);
@@ -1032,7 +1029,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
- int phy_base = chip->phy_addr_sel_strap;
+ int phy_base = chip->phy_addr_base;
if (phy == phy_base)
return lan9303_virt_phy_reg_write(chip, regnum, val);
@@ -1069,7 +1066,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
res = lan9303_phy_write(ds, port, MII_BMCR, ctl);
- if (port == chip->phy_addr_sel_strap) {
+ if (port == chip->phy_addr_base) {
/* Virtual Phy: Remove Turbo 200Mbit mode */
lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
@@ -1093,8 +1090,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
- MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1217,8 +1213,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
@@ -1235,8 +1230,7 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct lan9303 *chip = ds->priv;
@@ -1284,26 +1278,31 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
static int lan9303_register_switch(struct lan9303 *chip)
{
+ int base;
+
chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
if (!chip->ds)
return -ENOMEM;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
- chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+ base = chip->phy_addr_base;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
return dsa_register_switch(chip->ds);
}
-static void lan9303_probe_reset_gpio(struct lan9303 *chip,
+static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_LOW);
+ if (IS_ERR(chip->reset_gpio))
+ return PTR_ERR(chip->reset_gpio);
- if (IS_ERR(chip->reset_gpio)) {
+ if (!chip->reset_gpio) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
- return;
+ return 0;
}
chip->reset_duration = 200;
@@ -1318,6 +1317,8 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
/* A sane reset duration should not be longer than 1s */
if (chip->reset_duration > 1000)
chip->reset_duration = 1000;
+
+ return 0;
}
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
@@ -1325,13 +1326,14 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
int ret;
mutex_init(&chip->indirect_mutex);
+ mutex_init(&chip->alr_mutex);
- lan9303_probe_reset_gpio(chip, np);
-
- ret = lan9303_handle_reset(chip);
+ ret = lan9303_probe_reset_gpio(chip, np);
if (ret)
return ret;
+ lan9303_handle_reset(chip);
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b5be93a1e0df..663b0d5b982b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -559,8 +559,7 @@ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
}
static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
/* nothing needed */
@@ -568,8 +567,7 @@ static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
}
static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
@@ -858,16 +856,14 @@ exit:
}
static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
/* nothing to do */
return 0;
}
static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2820d69810b3..8a0bb000d056 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -805,6 +805,69 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
}
static void
+mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ bool all_user_ports_removed = true;
+ int i;
+
+ /* When a port is removed from the bridge, the port would be set up
+ * back to the default as is at initial boot which is a VLAN-unaware
+ * port.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_MATRIX_MODE);
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+
+ priv->ports[port].vlan_filtering = false;
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ if (dsa_is_user_port(ds, i) &&
+ priv->ports[i].vlan_filtering) {
+ all_user_ports_removed = false;
+ break;
+ }
+ }
+
+ /* CPU port also does the same thing until all user ports belonging to
+ * the CPU port get out of VLAN filtering mode.
+ */
+ if (all_user_ports_removed) {
+ mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+ mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
+ PORT_SPEC_TAG);
+ }
+}
+
+static void
+mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* The real fabric path would be decided on the membership in the
+ * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
+ * means potential VLAN can be consisting of certain subset of all
+ * ports.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port),
+ PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+
+ /* Trapped into security mode allows packet forwarding through VLAN
+ * table lookup.
+ */
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+ MT7530_PORT_SECURITY_MODE);
+
+ /* Set the port as a user port which is to be able to recognize VID
+ * from incoming packets before fetching entry within the VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+}
+
+static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
@@ -817,8 +880,11 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
+ * And the other port's port matrix cannot be broken when the
+ * other port is still a VLAN-aware port.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
+ if (!priv->ports[i].vlan_filtering &&
+ dsa_is_user_port(ds, i) && i != port) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -836,6 +902,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
PCR_MATRIX(BIT(MT7530_CPU_PORT)));
priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ mt7530_port_set_vlan_unaware(ds, port);
+
mutex_unlock(&priv->reg_mutex);
}
@@ -906,6 +974,220 @@ err:
return 0;
}
+static int
+mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
+{
+ struct mt7530_dummy_poll p;
+ u32 val;
+ int ret;
+
+ val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
+ mt7530_write(priv, MT7530_VTCR, val);
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
+ ret = readx_poll_timeout(_mt7530_read, &p, val,
+ !(val & VTCR_BUSY), 20, 20000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ return ret;
+ }
+
+ val = mt7530_read(priv, MT7530_VTCR);
+ if (val & VTCR_INVALID) {
+ dev_err(priv->dev, "read VTCR invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ priv->ports[port].vlan_filtering = vlan_filtering;
+
+ if (vlan_filtering) {
+ /* The port is being kept as VLAN-unaware port when bridge is
+ * set up with vlan_filtering not being set, Otherwise, the
+ * port and the corresponding CPU port is required the setup
+ * for becoming a VLAN-aware port.
+ */
+ mt7530_port_set_vlan_aware(ds, port);
+ mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ }
+
+ return 0;
+}
+
+static int
+mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ /* nothing needed */
+
+ return 0;
+}
+
+static void
+mt7530_hw_vlan_add(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members | BIT(entry->port) |
+ BIT(MT7530_CPU_PORT);
+
+ /* Validate the entry with independent learning, create egress tag per
+ * VLAN and joining the port as one of the port members.
+ */
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+
+ /* Decide whether adding tag or not for those outgoing packets from the
+ * port inside the VLAN.
+ */
+ val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
+ MT7530_VLAN_EGRESS_TAG;
+ mt7530_rmw(priv, MT7530_VAWD2,
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
+
+ /* CPU port is always taken as a tagged port for serving more than one
+ * VLANs across and also being applied with egress type stack mode for
+ * that VLAN tags would be appended after hardware special tag used as
+ * DSA tag.
+ */
+ mt7530_rmw(priv, MT7530_VAWD2,
+ ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
+ ETAG_CTRL_P(MT7530_CPU_PORT,
+ MT7530_VLAN_EGRESS_STACK));
+}
+
+static void
+mt7530_hw_vlan_del(struct mt7530_priv *priv,
+ struct mt7530_hw_vlan_entry *entry)
+{
+ u8 new_members;
+ u32 val;
+
+ new_members = entry->old_members & ~BIT(entry->port);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+ if (!(val & VLAN_VALID)) {
+ dev_err(priv->dev,
+ "Cannot be deleted due to invalid entry\n");
+ return;
+ }
+
+ /* If certain member apart from CPU port is still alive in the VLAN,
+ * the entry would be kept valid. Otherwise, the entry is got to be
+ * disabled.
+ */
+ if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
+ VLAN_VALID;
+ mt7530_write(priv, MT7530_VAWD1, val);
+ } else {
+ mt7530_write(priv, MT7530_VAWD1, 0);
+ mt7530_write(priv, MT7530_VAWD2, 0);
+ }
+}
+
+static void
+mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
+ struct mt7530_hw_vlan_entry *entry,
+ mt7530_vlan_op vlan_op)
+{
+ u32 val;
+
+ /* Fetch entry */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
+
+ val = mt7530_read(priv, MT7530_VAWD1);
+
+ entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
+
+ /* Manipulate entry */
+ vlan_op(priv, entry);
+
+ /* Flush result to hardware */
+ mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
+}
+
+static void
+mt7530_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct mt7530_hw_vlan_entry new_entry;
+ struct mt7530_priv *priv = ds->priv;
+ u16 vid;
+
+ /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+ * being set.
+ */
+ if (!priv->ports[port].vlan_filtering)
+ return;
+
+ mutex_lock(&priv->reg_mutex);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+ mt7530_hw_vlan_update(priv, vid, &new_entry,
+ mt7530_hw_vlan_add);
+ }
+
+ if (pvid) {
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+ G0_PORT_VID(vlan->vid_end));
+ priv->ports[port].pvid = vlan->vid_end;
+ }
+
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mt7530_hw_vlan_entry target_entry;
+ struct mt7530_priv *priv = ds->priv;
+ u16 vid, pvid;
+
+ /* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+ * being set.
+ */
+ if (!priv->ports[port].vlan_filtering)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+
+ pvid = priv->ports[port].pvid;
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+ mt7530_hw_vlan_update(priv, vid, &target_entry,
+ mt7530_hw_vlan_del);
+
+ /* PVID is being restored to the default whenever the PVID port
+ * is being removed from the VLAN.
+ */
+ if (pvid == vid)
+ pvid = G0_PORT_VID_DEF;
+ }
+
+ mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
+ priv->ports[port].pvid = pvid;
+
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port)
{
@@ -1035,6 +1317,10 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_fdb_add = mt7530_port_fdb_add,
.port_fdb_del = mt7530_port_fdb_del,
.port_fdb_dump = mt7530_port_fdb_dump,
+ .port_vlan_filtering = mt7530_port_vlan_filtering,
+ .port_vlan_prepare = mt7530_port_vlan_prepare,
+ .port_vlan_add = mt7530_port_vlan_add,
+ .port_vlan_del = mt7530_port_vlan_del,
};
static int
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 74db9822eb40..d9b407a22a58 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -17,6 +17,7 @@
#define MT7530_NUM_PORTS 7
#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
+#define MT7530_ALL_MEMBERS 0xff
#define NUM_TRGMII_CTRL 5
@@ -88,21 +89,42 @@ enum mt7530_fdb_cmd {
/* Register for vlan table control */
#define MT7530_VTCR 0x90
#define VTCR_BUSY BIT(31)
-#define VTCR_FUNC (((x) & 0xf) << 12)
-#define VTCR_FUNC_RD_VID 0x1
-#define VTCR_FUNC_WR_VID 0x2
-#define VTCR_FUNC_INV_VID 0x3
-#define VTCR_FUNC_VAL_VID 0x4
+#define VTCR_INVALID BIT(16)
+#define VTCR_FUNC(x) (((x) & 0xf) << 12)
#define VTCR_VID ((x) & 0xfff)
+enum mt7530_vlan_cmd {
+ /* Read/Write the specified VID entry from VAWD register based
+ * on VID.
+ */
+ MT7530_VTCR_RD_VID = 0,
+ MT7530_VTCR_WR_VID = 1,
+};
+
/* Register for setup vlan and acl write data */
#define MT7530_VAWD1 0x94
#define PORT_STAG BIT(31)
+/* Independent VLAN Learning */
#define IVL_MAC BIT(30)
+/* Per VLAN Egress Tag Control */
+#define VTAG_EN BIT(28)
+/* VLAN Member Control */
#define PORT_MEM(x) (((x) & 0xff) << 16)
-#define VALID BIT(1)
+/* VLAN Entry Valid */
+#define VLAN_VALID BIT(0)
+#define PORT_MEM_SHFT 16
+#define PORT_MEM_MASK 0xff
#define MT7530_VAWD2 0x98
+/* Egress Tag Control */
+#define ETAG_CTRL_P(p, x) (((x) & 0x3) << ((p) << 1))
+#define ETAG_CTRL_P_MASK(p) ETAG_CTRL_P(p, 3)
+
+enum mt7530_vlan_egress_attr {
+ MT7530_VLAN_EGRESS_UNTAG = 0,
+ MT7530_VLAN_EGRESS_TAG = 2,
+ MT7530_VLAN_EGRESS_STACK = 3,
+};
/* Register for port STP state control */
#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
@@ -120,11 +142,23 @@ enum mt7530_stp_state {
/* Register for port control */
#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
#define PORT_VLAN(x) ((x) & 0x3)
+
+enum mt7530_port_mode {
+ /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+ MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+
+ /* Security Mode: Discard any frame due to ingress membership
+ * violation or VID missed on the VLAN table.
+ */
+ MT7530_PORT_SECURITY_MODE = PORT_VLAN(3),
+};
+
#define PCR_MATRIX(x) (((x) & 0xff) << 16)
#define PORT_PRI(x) (((x) & 0x7) << 24)
#define EG_TAG(x) (((x) & 0x3) << 28)
#define PCR_MATRIX_MASK PCR_MATRIX(0xff)
#define PCR_MATRIX_CLR PCR_MATRIX(0)
+#define PCR_PORT_VLAN_MASK PORT_VLAN(3)
/* Register for port security control */
#define MT7530_PSC_P(x) (0x200c + ((x) * 0x100))
@@ -134,10 +168,20 @@ enum mt7530_stp_state {
#define MT7530_PVC_P(x) (0x2010 + ((x) * 0x100))
#define PORT_SPEC_TAG BIT(5)
#define VLAN_ATTR(x) (((x) & 0x3) << 6)
+#define VLAN_ATTR_MASK VLAN_ATTR(3)
+
+enum mt7530_vlan_port_attr {
+ MT7530_VLAN_USER = 0,
+ MT7530_VLAN_TRANSPARENT = 3,
+};
+
#define STAG_VPID (((x) & 0xffff) << 16)
/* Register for port port-and-protocol based vlan 1 control */
#define MT7530_PPBV1_P(x) (0x2014 + ((x) * 0x100))
+#define G0_PORT_VID(x) (((x) & 0xfff) << 0)
+#define G0_PORT_VID_MASK G0_PORT_VID(0xfff)
+#define G0_PORT_VID_DEF G0_PORT_VID(1)
/* Register for port MAC control register */
#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
@@ -345,9 +389,20 @@ struct mt7530_fdb {
bool noarp;
};
+/* struct mt7530_port - This is the main data structure for holding the state
+ * of the port.
+ * @enable: The status used for show port is enabled or not.
+ * @pm: The matrix used to show all connections with the port.
+ * @pvid: The VLAN specified is to be considered a PVID at ingress. Any
+ * untagged frames will be assigned to the related VLAN.
+ * @vlan_filtering: The flags indicating whether the port that can recognize
+ * VLAN-tagged frames.
+ */
struct mt7530_port {
bool enable;
u32 pm;
+ u16 pvid;
+ bool vlan_filtering;
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -382,6 +437,22 @@ struct mt7530_priv {
struct mutex reg_mutex;
};
+struct mt7530_hw_vlan_entry {
+ int port;
+ u8 old_members;
+ bool untagged;
+};
+
+static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
+ int port, bool untagged)
+{
+ e->port = port;
+ e->untagged = untagged;
+}
+
+typedef void (*mt7530_vlan_op)(struct mt7530_priv *,
+ struct mt7530_hw_vlan_entry *);
+
struct mt7530_hw_stats {
const char *string;
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 66d33e97cbc5..eb328bade225 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1185,8 +1185,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
static int
mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -1295,8 +1294,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1725,9 +1723,11 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
{
- bool flood = port == dsa_upstream_port(chip->ds);
+ struct dsa_switch *ds = chip->ds;
+ bool flood;
/* Upstream ports flood frames with unknown unicast or multicast DA */
+ flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
if (chip->info->ops->port_set_egress_floods)
return chip->info->ops->port_set_egress_floods(chip, port,
flood, flood);
@@ -1744,6 +1744,39 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ int upstream_port;
+ int err;
+
+ upstream_port = dsa_upstream_port(ds, port);
+ if (chip->info->ops->port_set_upstream_port) {
+ err = chip->info->ops->port_set_upstream_port(chip, port,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (port == upstream_port) {
+ if (chip->info->ops->set_cpu_port) {
+ err = chip->info->ops->set_cpu_port(chip,
+ upstream_port);
+ if (err)
+ return err;
+ }
+
+ if (chip->info->ops->set_egress_port) {
+ err = chip->info->ops->set_egress_port(chip,
+ upstream_port);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -1814,13 +1847,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg = 0;
- if (chip->info->ops->port_set_upstream_port) {
- err = chip->info->ops->port_set_upstream_port(
- chip, port, dsa_upstream_port(ds));
- if (err)
- return err;
- }
+ err = mv88e6xxx_setup_upstream_port(chip, port);
+ if (err)
+ return err;
err = mv88e6xxx_port_set_8021q_mode(chip, port,
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
@@ -1946,21 +1975,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
{
struct dsa_switch *ds = chip->ds;
- u32 upstream_port = dsa_upstream_port(ds);
int err;
- if (chip->info->ops->set_cpu_port) {
- err = chip->info->ops->set_cpu_port(chip, upstream_port);
- if (err)
- return err;
- }
-
- if (chip->info->ops->set_egress_port) {
- err = chip->info->ops->set_egress_port(chip, upstream_port);
- if (err)
- return err;
- }
-
/* Disable remote management, and set the switch's DSA device number. */
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
@@ -3741,6 +3757,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv)
@@ -3788,10 +3805,10 @@ free:
return NULL;
}
+#endif
static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
@@ -3801,8 +3818,7 @@ static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -3829,7 +3845,9 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
}
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
.probe = mv88e6xxx_drv_probe,
+#endif
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.adjust_link = mv88e6xxx_adjust_link,
@@ -3958,11 +3976,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out_g1_irq;
}
+
+ err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
+ if (err)
+ goto out_g2_irq;
+
+ err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+ if (err)
+ goto out_g1_atu_prob_irq;
}
err = mv88e6xxx_mdios_register(chip, np);
if (err)
- goto out_g2_irq;
+ goto out_g1_vtu_prob_irq;
err = mv88e6xxx_register_switch(chip);
if (err)
@@ -3972,6 +3998,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
out_mdio:
mv88e6xxx_mdios_unregister(chip);
+out_g1_vtu_prob_irq:
+ if (chip->irq > 0)
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+out_g1_atu_prob_irq:
+ if (chip->irq > 0)
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
out_g2_irq:
if (chip->info->g2_irqs > 0 && chip->irq > 0)
mv88e6xxx_g2_irq_free(chip);
@@ -3995,6 +4027,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_mdios_unregister(chip);
if (chip->irq > 0) {
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
mutex_lock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 334f6f7544ba..3dba6e90adcf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -207,6 +207,8 @@ struct mv88e6xxx_chip {
int irq;
int device_irq;
int watchdog_irq;
+ int atu_prob_irq;
+ int vtu_prob_irq;
};
struct mv88e6xxx_bus_ops {
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index b0dc7518b47f..6aee7316fea6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -29,9 +29,9 @@
#define MV88E6XXX_G1_STS_IRQ_AVB 8
#define MV88E6XXX_G1_STS_IRQ_DEVICE 7
#define MV88E6XXX_G1_STS_IRQ_STATS 6
-#define MV88E6XXX_G1_STS_IRQ_VTU_PROBLEM 5
+#define MV88E6XXX_G1_STS_IRQ_VTU_PROB 5
#define MV88E6XXX_G1_STS_IRQ_VTU_DONE 4
-#define MV88E6XXX_G1_STS_IRQ_ATU_PROBLEM 3
+#define MV88E6XXX_G1_STS_IRQ_ATU_PROB 3
#define MV88E6XXX_G1_STS_IRQ_ATU_DONE 2
#define MV88E6XXX_G1_STS_IRQ_TCAM_DONE 1
#define MV88E6XXX_G1_STS_IRQ_EEPROM_DONE 0
@@ -82,6 +82,10 @@
#define MV88E6XXX_G1_VTU_OP_VTU_GET_NEXT 0x4000
#define MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE 0x5000
#define MV88E6XXX_G1_VTU_OP_STU_GET_NEXT 0x6000
+#define MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_VTU_OP_MISS_VIOLATION BIT(5)
+#define MV88E6XXX_G1_VTU_OP_SPID_MASK 0xf
/* Offset 0x06: VTU VID Register */
#define MV88E6XXX_G1_VTU_VID 0x06
@@ -122,6 +126,10 @@
#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_ALL_DB 0x5000
#define MV88E6XXX_G1_ATU_OP_FLUSH_MOVE_NON_STATIC_DB 0x6000
#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
+#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
+#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5)
+#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
/* Offset 0x0C: ATU Data Register */
#define MV88E6XXX_G1_ATU_DATA 0x0c
@@ -255,6 +263,8 @@ int mv88e6xxx_g1_atu_loadpurge(struct mv88e6xxx_chip *chip, u16 fid,
int mv88e6xxx_g1_atu_flush(struct mv88e6xxx_chip *chip, u16 fid, bool all);
int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all);
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -269,5 +279,7 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index efeef4b01442..20d941f4273b 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -9,6 +9,8 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include "chip.h"
#include "global1.h"
@@ -307,3 +309,88 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
return mv88e6xxx_g1_atu_move(chip, fid, from_port, to_port, all);
}
+
+static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_atu_entry entry;
+ int err;
+ u16 val;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6xxx_g1_atu_op(chip, 0,
+ MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
+ if (err)
+ goto out;
+
+ mutex_unlock(&chip->reg_lock);
+
+ if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU age out violation for %pM\n",
+ entry.mac);
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU member violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION)
+ dev_err_ratelimited(chip->dev,
+ "ATU miss violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+
+ if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION)
+ dev_err_ratelimited(chip->dev,
+ "ATU full violation for %pM portvec %x\n",
+ entry.mac, entry.portvec);
+
+ return IRQ_HANDLED;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
+ err);
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->atu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_ATU_PROB);
+ if (chip->atu_prob_irq < 0)
+ return chip->atu_prob_irq;
+
+ err = request_threaded_irq(chip->atu_prob_irq, NULL,
+ mv88e6xxx_g1_atu_prob_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-g1-atu-prob",
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->atu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->atu_prob_irq, chip);
+ irq_dispose_mapping(chip->atu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 8c8a0ec3d6e9..7997961647de 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -11,6 +11,9 @@
* (at your option) any later version.
*/
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+
#include "chip.h"
#include "global1.h"
@@ -513,3 +516,74 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
}
+
+static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_vtu_entry entry;
+ int spid;
+ int err;
+ u16 val;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_GET_CLR_VIOLATION);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, &val);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ if (err)
+ goto out;
+
+ mutex_unlock(&chip->reg_lock);
+
+ spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
+
+ if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
+ dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
+ entry.vid, spid);
+ }
+
+ if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION)
+ dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
+ entry.vid, spid);
+
+ return IRQ_HANDLED;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ dev_err(chip->dev, "VTU problem: error %d while handling interrupt\n",
+ err);
+
+ return IRQ_HANDLED;
+}
+
+int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ chip->vtu_prob_irq = irq_find_mapping(chip->g1_irq.domain,
+ MV88E6XXX_G1_STS_IRQ_VTU_PROB);
+ if (chip->vtu_prob_irq < 0)
+ return chip->vtu_prob_irq;
+
+ err = request_threaded_irq(chip->vtu_prob_irq, NULL,
+ mv88e6xxx_g1_vtu_prob_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-g1-vtu-prob",
+ chip);
+ if (err)
+ irq_dispose_mapping(chip->vtu_prob_irq);
+
+ return err;
+}
+
+void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip)
+{
+ free_irq(chip->vtu_prob_irq, chip);
+ irq_dispose_mapping(chip->vtu_prob_irq);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
break;
case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 58483af80bdb..30b1c8512049 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,48 +42,7 @@
#define DRV_NAME "dummy"
#define DRV_VERSION "1.0"
-#undef pr_fmt
-#define pr_fmt(fmt) DRV_NAME ": " fmt
-
static int numdummies = 1;
-static int num_vfs;
-
-struct vf_data_storage {
- u8 vf_mac[ETH_ALEN];
- u16 pf_vlan; /* When set, guest VLAN config not allowed. */
- u16 pf_qos;
- __be16 vlan_proto;
- u16 min_tx_rate;
- u16 max_tx_rate;
- u8 spoofchk_enabled;
- bool rss_query_enabled;
- u8 trusted;
- int link_state;
-};
-
-struct dummy_priv {
- struct vf_data_storage *vfinfo;
-};
-
-static int dummy_num_vf(struct device *dev)
-{
- return num_vfs;
-}
-
-static struct bus_type dummy_bus = {
- .name = "dummy",
- .num_vf = dummy_num_vf,
-};
-
-static void release_dummy_parent(struct device *dev)
-{
-}
-
-static struct device dummy_parent = {
- .init_name = "dummy",
- .bus = &dummy_bus,
- .release = release_dummy_parent,
-};
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
@@ -133,25 +92,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- struct dummy_priv *priv = netdev_priv(dev);
-
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- priv->vfinfo = NULL;
-
- if (!num_vfs)
- return 0;
-
- dev->dev.parent = &dummy_parent;
- priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
- GFP_KERNEL);
- if (!priv->vfinfo) {
- free_percpu(dev->dstats);
- return -ENOMEM;
- }
-
return 0;
}
@@ -169,117 +113,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
return 0;
}
-static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
- return -EINVAL;
-
- memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
-
- return 0;
-}
-
-static int dummy_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos, __be16 vlan_proto)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
-
- priv->vfinfo[vf].pf_vlan = vlan;
- priv->vfinfo[vf].pf_qos = qos;
- priv->vfinfo[vf].vlan_proto = vlan_proto;
-
- return 0;
-}
-
-static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].min_tx_rate = min;
- priv->vfinfo[vf].max_tx_rate = max;
-
- return 0;
-}
-
-static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].spoofchk_enabled = val;
-
- return 0;
-}
-
-static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].rss_query_enabled = val;
-
- return 0;
-}
-
-static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].trusted = val;
-
- return 0;
-}
-
-static int dummy_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- ivi->vf = vf;
- memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
- ivi->vlan = priv->vfinfo[vf].pf_vlan;
- ivi->qos = priv->vfinfo[vf].pf_qos;
- ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
- ivi->linkstate = priv->vfinfo[vf].link_state;
- ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
- ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
- ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
- ivi->trusted = priv->vfinfo[vf].trusted;
- ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
-
- return 0;
-}
-
-static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- priv->vfinfo[vf].link_state = state;
-
- return 0;
-}
-
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -289,14 +122,6 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
.ndo_change_carrier = dummy_change_carrier,
- .ndo_set_vf_mac = dummy_set_vf_mac,
- .ndo_set_vf_vlan = dummy_set_vf_vlan,
- .ndo_set_vf_rate = dummy_set_vf_rate,
- .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk,
- .ndo_set_vf_trust = dummy_set_vf_trust,
- .ndo_get_vf_config = dummy_get_vf_config,
- .ndo_set_vf_link_state = dummy_set_vf_link_state,
- .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
};
static void dummy_get_drvinfo(struct net_device *dev,
@@ -323,13 +148,6 @@ static const struct ethtool_ops dummy_ethtool_ops = {
.get_ts_info = dummy_get_ts_info,
};
-static void dummy_free_netdev(struct net_device *dev)
-{
- struct dummy_priv *priv = netdev_priv(dev);
-
- kfree(priv->vfinfo);
-}
-
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -338,7 +156,6 @@ static void dummy_setup(struct net_device *dev)
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
dev->needs_free_netdev = true;
- dev->priv_destructor = dummy_free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
@@ -370,7 +187,6 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
.kind = DRV_NAME,
- .priv_size = sizeof(struct dummy_priv),
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -379,16 +195,12 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
-module_param(num_vfs, int, 0);
-MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
-
static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
- "dummy%d", NET_NAME_ENUM, dummy_setup);
+ dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup);
if (!dev_dummy)
return -ENOMEM;
@@ -407,21 +219,6 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
- if (num_vfs) {
- err = bus_register(&dummy_bus);
- if (err < 0) {
- pr_err("registering dummy bus failed\n");
- return err;
- }
-
- err = device_register(&dummy_parent);
- if (err < 0) {
- pr_err("registering dummy parent device failed\n");
- bus_unregister(&dummy_bus);
- return err;
- }
- }
-
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
if (err < 0)
@@ -437,22 +234,12 @@ static int __init dummy_init_module(void)
out:
rtnl_unlock();
- if (err && num_vfs) {
- device_unregister(&dummy_parent);
- bus_unregister(&dummy_bus);
- }
-
return err;
}
static void __exit dummy_cleanup_module(void)
{
rtnl_link_unregister(&dummy_link_ops);
-
- if (num_vfs) {
- device_unregister(&dummy_parent);
- bus_unregister(&dummy_bus);
- }
}
module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7014bd..36c8950dbd2d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int dirty_tx; /* The ring entries to be free()ed. */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
- struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
- timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
- vp->cur_rx = vp->dirty_rx = 0;
+ vp->cur_rx = 0;
/* Initialize the RxEarly register as recommended. */
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
int i;
int retval;
+ dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ break;
+ vp->rx_ring[i].addr = cpu_to_le32(dma);
}
if (i != RX_RING_SIZE) {
pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
- int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+ int rx_work_limit = RX_RING_SIZE;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
+ dma_addr_t newdma;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
+ /* Pre-allocate the replacement skb. If it or its
+ * mapping fails then recycle the buffer thats already
+ * in place
+ */
+ newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (!newskb) {
+ dev->stats.rx_dropped++;
+ goto clear_complete;
+ }
+ newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ dev->stats.rx_dropped++;
+ consume_skb(newskb);
+ goto clear_complete;
+ }
+
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
- vp->rx_skbuff[entry] = NULL;
+ vp->rx_skbuff[entry] = newskb;
+ vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
netif_rx(skb);
dev->stats.rx_packets++;
}
- entry = (++vp->cur_rx) % RX_RING_SIZE;
- }
- /* Refill the Rx ring buffers. */
- for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
- struct sk_buff *skb;
- entry = vp->dirty_rx % RX_RING_SIZE;
- if (vp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
- if (skb == NULL) {
- static unsigned long last_jif;
- if (time_after(jiffies, last_jif + 10 * HZ)) {
- pr_warn("%s: memory shortage\n",
- dev->name);
- last_jif = jiffies;
- }
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
- mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
- break; /* Bad news! */
- }
- vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
- vp->rx_skbuff[entry] = skb;
- }
+clear_complete:
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
}
return 0;
}
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory. Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
- struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
- struct net_device *dev = vp->mii.dev;
-
- spin_lock_irq(&vp->lock);
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
- boomerang_rx(dev);
- if (vortex_debug > 1) {
- pr_debug("%s: rx_oom_timer %s\n", dev->name,
- ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
- }
- spin_unlock_irq(&vp->lock);
-}
-
static void
vortex_down(struct net_device *dev, int final_down)
{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 9497f18eaba0..2f91ce8dc614 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,7 +123,8 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+static int mac8390_initdev(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -169,11 +170,11 @@ static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
{
- switch (dev->dr_sw) {
+ switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_APPLE_SONIC_NB:
case NUBUS_DRHW_APPLE_SONIC_LC:
case NUBUS_DRHW_SONNET:
@@ -184,7 +185,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_ASANTE_LC:
return MAC8390_NONE;
case NUBUS_DRHW_CABLETRON:
@@ -201,7 +202,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ if (fres->dr_hw == NUBUS_DRHW_CABLETRON)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
@@ -212,7 +213,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_INTERLAN:
return MAC8390_INTERLAN;
default:
@@ -225,8 +226,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
* These correspond to Dayna Sonic cards
* which use the macsonic driver
*/
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 ||
+ fres->dr_hw == NUBUS_DRHW_INTERLAN)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
@@ -289,7 +290,8 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+static bool __init mac8390_init(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type cardtype)
{
struct nubus_dir dir;
@@ -394,7 +396,7 @@ static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- struct nubus_dev *ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
struct ei_device *ei_local;
@@ -414,8 +416,11 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
- ndev))) {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1 << ndev->board->slot))
continue;
@@ -489,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
};
static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_dev *ndev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c60421339a98..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -170,6 +171,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 39f6273358ed..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index a1a52eb53b14..8f71b79b4949 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev)
ace_set_txprd(regs, ap, 0);
writel(0, &regs->RxRetCsm);
- /*
- * Enable DMA engine now.
- * If we do this sooner, Mckinley box pukes.
- * I assume it's because Tigon II DMA engine wants to check
- * *something* even before the CPU is started.
- */
- writel(1, &regs->AssistState); /* enable DMA */
+ /*
+ * Enable DMA engine now.
+ * If we do this sooner, Mckinley box pukes.
+ * I assume it's because Tigon II DMA engine wants to check
+ * *something* even before the CPU is started.
+ */
+ writel(1, &regs->AssistState); /* enable DMA */
/*
* Start the NIC CPU
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index b11e573ad57a..ea149c134e15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bb53c3a4f8e9..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89a9cf7..6975150d144e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
{
@@ -158,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->per_napi_packets = 0;
ring->per_napi_bytes = 0;
ring->cpu = 0;
+ ring->first_interrupt = false;
+ ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -1274,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
+ ena_napi->tx_ring->first_interrupt = true;
+ ena_napi->rx_ring->first_interrupt = true;
+
napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED;
@@ -1565,7 +1573,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
static int ena_up_complete(struct ena_adapter *adapter)
{
- int rc, i;
+ int rc;
rc = ena_rss_configure(adapter);
if (rc)
@@ -1584,17 +1592,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
ena_napi_enable_all(adapter);
- /* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
- ena_unmask_interrupt(&adapter->tx_ring[i],
- &adapter->rx_ring[i]);
-
- /* schedule napi in case we had pending packets
- * from the last time we disable napi
- */
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-
return 0;
}
@@ -1731,7 +1728,7 @@ create_err:
static int ena_up(struct ena_adapter *adapter)
{
- int rc;
+ int rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
@@ -1774,6 +1771,17 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ /* Enable completion queues interrupt */
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_unmask_interrupt(&adapter->tx_ring[i],
+ &adapter->rx_ring[i]);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
return rc;
err_up:
@@ -1884,6 +1892,17 @@ static int ena_close(struct net_device *netdev)
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
+ /* Check for device status and issue reset if needed*/
+ check_for_admin_com_state(adapter);
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Destroy failure, restarting device\n");
+ ena_dump_stats_to_dmesg(adapter);
+ /* rtnl lock already obtained in dev_ioctl() layer */
+ ena_destroy_device(adapter);
+ ena_restore_device(adapter);
+ }
+
return 0;
}
@@ -2544,11 +2563,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
ena_com_set_admin_running_state(ena_dev, false);
- ena_close(netdev);
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
/* Before releasing the ENA resources, a device reset is required.
* (to prevent the device from accessing them).
- * In case the reset flag is set and the device is up, ena_close
+ * In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
@@ -2648,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work)
rtnl_unlock();
}
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
- struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+ struct ena_ring *rx_ring)
+{
+ if (likely(rx_ring->first_interrupt))
+ return 0;
+
+ if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+ return 0;
+
+ rx_ring->no_interrupt_event_cnt++;
+
+ if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+ netif_err(adapter, rx_err, adapter->netdev,
+ "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+ rx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ struct ena_ring *tx_ring)
{
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
@@ -2659,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
- if (unlikely(last_jiffies &&
- time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+ if (last_jiffies == 0)
+ /* no pending Tx at this location */
+ continue;
+
+ if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))) {
+ /* If after graceful period interrupt is still not
+ * received, we schedule a reset
+ */
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+ tx_ring->qid);
+ adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ return -EIO;
+ }
+
+ if (unlikely(time_is_before_jiffies(last_jiffies +
+ adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2689,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
return rc;
}
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
+ struct ena_ring *rx_ring;
int i, budget, rc;
/* Make sure the driver doesn't turn the device in other process */
@@ -2710,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
tx_ring = &adapter->tx_ring[i];
+ rx_ring = &adapter->rx_ring[i];
+
+ rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ if (unlikely(rc))
+ return;
- rc = check_missing_comp_in_queue(adapter, tx_ring);
+ rc = check_for_rx_interrupt_queue(adapter, rx_ring);
if (unlikely(rc))
return;
@@ -2870,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t)
check_for_admin_com_state(adapter);
- check_for_missing_tx_completions(adapter);
+ check_for_missing_completions(adapter);
check_for_empty_rx_ring(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3bbc003871de..f1972b5ab650 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 3
+#define DRV_MODULE_VER_MINOR 5
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
@@ -122,6 +122,7 @@
* We wait for 6 sec just to be on the safe side.
*/
#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
@@ -236,6 +237,9 @@ struct ena_ring {
/* The maximum header length the device can handle */
u8 tx_max_header_size;
+ bool first_interrupt;
+ u16 no_interrupt_event_cnt;
+
/* cpu for TPH */
int cpu;
/* number of tx/rx_buffer_info's entries */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 9aec43c5bba8..48ca97fbe7bc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a74a8fbad53a..7a3ebfd236f5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index e4ae696920ef..686f6d8c9e79 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -39,4 +39,5 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..0b49f1aeebd3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
#define AQ_CFG_PCI_FUNC_PORTS 2U
-#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -65,7 +65,13 @@
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
-#define AQ_CFG_FC_MODE 3U
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
@@ -80,6 +86,7 @@
#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
__stringify(NIC_MINOR_DRIVER_VERSION)"."\
__stringify(NIC_BUILD_DRIVER_VERSION)"."\
- __stringify(NIC_REVISION_DRIVER_VERSION)
+ __stringify(NIC_REVISION_DRIVER_VERSION) \
+ AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e222a234..d52b088ff8f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,8 +16,45 @@
#include <linux/pci.h>
#include "ver.h"
-#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+
+#define AQ_DEVICE_ID_0001 0x0001
+#define AQ_DEVICE_ID_D100 0xD100
+#define AQ_DEVICE_ID_D107 0xD107
+#define AQ_DEVICE_ID_D108 0xD108
+#define AQ_DEVICE_ID_D109 0xD109
+
+#define AQ_DEVICE_ID_AQC100 0x00B1
+#define AQ_DEVICE_ID_AQC107 0x07B1
+#define AQ_DEVICE_ID_AQC108 0x08B1
+#define AQ_DEVICE_ID_AQC109 0x09B1
+#define AQ_DEVICE_ID_AQC111 0x11B1
+#define AQ_DEVICE_ID_AQC112 0x12B1
+
+#define AQ_DEVICE_ID_AQC100S 0x80B1
+#define AQ_DEVICE_ID_AQC107S 0x87B1
+#define AQ_DEVICE_ID_AQC108S 0x88B1
+#define AQ_DEVICE_ID_AQC109S 0x89B1
+#define AQ_DEVICE_ID_AQC111S 0x91B1
+#define AQ_DEVICE_ID_AQC112S 0x92B1
+
+#define AQ_DEVICE_ID_AQC111E 0x51B1
+#define AQ_DEVICE_ID_AQC112E 0x52B1
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
+
+#define AQ_HWREV_ANY 0
+#define AQ_HWREV_1 1
+#define AQ_HWREV_2 2
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"OutUCast",
"OutMCast",
"OutBCast",
- "InUCastOctects",
- "OutUCastOctects",
- "InMCastOctects",
- "OutMCastOctects",
- "InBCastOctects",
- "OutBCastOctects",
- "InOctects",
- "OutOctects",
+ "InUCastOctets",
+ "OutUCastOctets",
+ "InMCastOctets",
+ "OutMCastOctets",
+ "InBCastOctets",
+ "OutBCastOctets",
+ "InOctets",
+ "OutOctets",
"InPacketsDma",
"OutPacketsDma",
"InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..a2d416b24ffc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
* version 2, as published by the Free Software Foundation.
*/
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
* functions.
*/
@@ -15,12 +15,15 @@
#define AQ_HW_H
#include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
u64 link_speed_msk;
unsigned int hw_priv_flags;
+ u32 media_type;
u32 rxds;
u32 txds;
u32 txhwb_alignment;
@@ -28,7 +31,7 @@ struct aq_hw_caps_s {
u32 vecs;
u32 mtu;
u32 mac_regs_count;
- u8 ports;
+ u32 hw_alive_check_addr;
u8 msix_irqs;
u8 tcs;
u8 rxd_alignment;
@@ -39,13 +42,34 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
- u32 fw_ver_expected;
};
struct aq_hw_link_status_s {
unsigned int mbps;
};
+struct aq_stats_s {
+ u64 uprc;
+ u64 mprc;
+ u64 bprc;
+ u64 erpt;
+ u64 uptc;
+ u64 mptc;
+ u64 bptc;
+ u64 erpr;
+ u64 mbtc;
+ u64 bbtc;
+ u64 mbrc;
+ u64 bbrc;
+ u64 ubrc;
+ u64 ubtc;
+ u64 dpc;
+ u64 dma_pkt_rc;
+ u64 dma_pkt_tc;
+ u64 dma_oct_rc;
+ u64 dma_oct_tc;
+};
+
#define AQ_HW_IRQ_INVALID 0U
#define AQ_HW_IRQ_LEGACY 1U
#define AQ_HW_IRQ_MSI 2U
@@ -64,28 +88,43 @@ struct aq_hw_link_status_s {
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#define AQ_HW_MEDIA_TYPE_TP 1U
+#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+
struct aq_hw_s {
- struct aq_obj_s header;
+ atomic_t flags;
+ u8 rbl_enabled:1;
struct aq_nic_cfg_s *aq_nic_cfg;
- struct aq_pci_func_s *aq_pci_func;
+ const struct aq_fw_ops *aq_fw_ops;
void __iomem *mmio;
- unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
+ struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct aq_stats_s curr_stats;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
struct aq_ring_param_s;
-struct aq_nic_cfg_s;
struct sk_buff;
struct aq_hw_ops {
- struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
- unsigned int port, struct aq_hw_ops *ops);
-
- void (*destroy)(struct aq_hw_s *self);
-
- int (*get_hw_caps)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
@@ -99,20 +138,11 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_get_mac_permanent)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
- u8 *mac);
-
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
- int (*hw_get_link_status)(struct aq_hw_s *self);
-
- int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
-
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -160,12 +190,10 @@ struct aq_hw_ops {
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+ const struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
- int (*hw_update_stats)(struct aq_hw_s *self);
-
- int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
- unsigned int *p_count);
+ struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
@@ -174,4 +202,20 @@ struct aq_hw_ops {
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
+struct aq_fw_ops {
+ int (*init)(struct aq_hw_s *self);
+
+ int (*reset)(struct aq_hw_s *self);
+
+ int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
+
+ int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+
+ int (*update_link_status)(struct aq_hw_s *self);
+
+ int (*update_stats)(struct aq_hw_s *self);
+};
+
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465995f6..d526c4f19d34 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -13,6 +13,7 @@
#include "aq_hw_utils.h"
#include "aq_hw.h"
+#include "aq_nic.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -39,8 +40,10 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
u32 value = readl(hw->mmio + reg);
- if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
- aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+ if ((~0U) == value &&
+ (~0U) == readl(hw->mmio +
+ hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
+ aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
@@ -54,11 +57,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index 03b72ddbffb9..dc88a1221f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,9 @@ do { \
} \
} while (0)
+#define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+#define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__)
+
struct aq_hw_s;
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d86775..ba5fe8c4125d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,49 +13,39 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
-static const struct pci_device_id aq_pci_tbl[] = {
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
- { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
{
- struct aq_hw_ops *ops = NULL;
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *aq_nic = NULL;
+
+ ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+ if (!ndev)
+ return NULL;
- ops = hw_atl_a0_get_ops_by_id(pdev);
- if (!ops)
- ops = hw_atl_b0_get_ops_by_id(pdev);
+ aq_nic = netdev_priv(ndev);
+ aq_nic->ndev = ndev;
+ ndev->netdev_ops = &aq_ndev_ops;
+ ndev->ethtool_ops = &aq_ethtool_ops;
- return ops;
+ return ndev;
}
static int aq_ndev_open(struct net_device *ndev)
{
- struct aq_nic_s *aq_nic = NULL;
int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic = aq_nic_alloc_hot(ndev);
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
- }
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
@@ -78,7 +68,6 @@ static int aq_ndev_close(struct net_device *ndev)
if (err < 0)
goto err_exit;
aq_nic_deinit(aq_nic);
- aq_nic_free_hot_resources(aq_nic);
err_exit:
return err;
@@ -150,15 +139,13 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
if (err < 0)
- goto err_exit;
+ return;
if (netdev_mc_count(ndev)) {
err = aq_nic_set_multicast_list(aq_nic, ndev);
if (err < 0)
- goto err_exit;
+ return;
}
-
-err_exit:;
}
static const struct net_device_ops aq_ndev_ops = {
@@ -170,66 +157,3 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
-
-static int aq_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- struct aq_hw_ops *aq_hw_ops = NULL;
- struct aq_pci_func_s *aq_pci_func = NULL;
- int err = 0;
-
- err = pci_enable_device(pdev);
- if (err < 0)
- goto err_exit;
- aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
- aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
- &aq_ndev_ops, &aq_ethtool_ops);
- if (!aq_pci_func) {
- err = -ENOMEM;
- goto err_exit;
- }
- err = aq_pci_func_init(aq_pci_func);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- if (err < 0) {
- if (aq_pci_func)
- aq_pci_func_free(aq_pci_func);
- }
- return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- aq_pci_func_deinit(aq_pci_func);
- aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
- struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
- pm_message_t pm_msg = PMSG_RESTORE;
-
- return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
- .name = AQ_CFG_DRV_NAME,
- .id_table = aq_pci_tbl,
- .probe = aq_pci_probe,
- .remove = aq_pci_remove,
- .suspend = aq_pci_suspend,
- .resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e575e0..ce92152eb43e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
#include "aq_common.h"
+struct net_device *aq_ndev_alloc(void);
+
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2ab78ce..ebbaf63eaf47 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,6 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
-#include "aq_nic_internal.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -37,6 +36,8 @@ static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -59,19 +60,13 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
-/* Fills aq_nic_cfg with valid defaults */
-static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
- cfg->aq_hw_caps = &self->aq_hw_caps;
-
- cfg->vecs = AQ_CFG_VECS_DEF;
cfg->tcs = AQ_CFG_TCS_DEF;
- cfg->rxds = AQ_CFG_RXDS_DEF;
- cfg->txds = AQ_CFG_TXDS_DEF;
-
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
cfg->itr = aq_itr;
@@ -92,19 +87,13 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
-}
-
-/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
-int aq_nic_cfg_start(struct aq_nic_s *self)
-{
- struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
/*descriptors */
- cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
- cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+ cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
+ cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
/*rss rings */
- cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
@@ -118,23 +107,22 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
- cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+ cfg->irq_type = aq_pci_func_get_irq_type(self);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
- (self->aq_hw_caps.vecs == 1U) ||
+ (cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
cfg->vecs = 1U;
}
- cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
- cfg->hw_features = self->aq_hw_caps.hw_features;
- return 0;
+ cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
+ cfg->hw_features = cfg->aq_hw_caps->hw_features;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
{
- int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
+ int err = self->aq_fw_ops->update_link_status(self->aq_hw);
if (err)
return err;
@@ -148,9 +136,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
- aq_utils_obj_set(&self->header.flags,
+ aq_utils_obj_set(&self->flags,
AQ_NIC_FLAG_STARTED);
- aq_utils_obj_clear(&self->header.flags,
+ aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
netif_tx_wake_all_queues(self->ndev);
@@ -158,7 +146,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
- aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
}
return 0;
}
@@ -166,39 +154,27 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
- struct net_device *ndev = aq_nic_get_ndev(self);
+ int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- unsigned int i = 0U;
- struct aq_ring_stats_rx_s stats_rx;
- struct aq_ring_stats_tx_s stats_tx;
- if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = aq_nic_update_link_status(self);
if (err)
goto err_exit;
- if (self->aq_hw_ops.hw_update_stats)
- self->aq_hw_ops.hw_update_stats(self->aq_hw);
+ if (self->aq_fw_ops->update_stats)
+ self->aq_fw_ops->update_stats(self->aq_hw);
- memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
- memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
- if (self->aq_vec[i])
- aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
- }
+ aq_nic_update_ndev_stats(self);
- ndev->stats.rx_packets = stats_rx.packets;
- ndev->stats.rx_bytes = stats_rx.bytes;
- ndev->stats.rx_errors = stats_rx.errors;
- ndev->stats.tx_packets = stats_tx.packets;
- ndev->stats.tx_bytes = stats_tx.bytes;
- ndev->stats.tx_errors = stats_tx.errors;
+ /* If no link - use faster timer rate to detect link up asap */
+ if (!netif_carrier_ok(self->ndev))
+ ctimer = max(ctimer / 2, 1);
err_exit:
- mod_timer(&self->service_timer,
- jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer, jiffies + ctimer);
}
static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -215,59 +191,6 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
-static struct net_device *aq_nic_ndev_alloc(void)
-{
- return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct device *dev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops)
-{
- struct net_device *ndev = NULL;
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- ndev = aq_nic_ndev_alloc();
- if (!ndev) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- self = netdev_priv(ndev);
-
- ndev->netdev_ops = ndev_ops;
- ndev->ethtool_ops = et_ops;
-
- SET_NETDEV_DEV(ndev, dev);
-
- ndev->if_port = port;
- self->ndev = ndev;
-
- self->aq_pci_func = aq_pci_func;
-
- self->aq_hw_ops = *aq_hw_ops;
- self->port = (u8)port;
-
- self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
- &self->aq_hw_ops);
- err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
- if (err < 0)
- goto err_exit;
-
- aq_nic_cfg_init_defaults(self);
-
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
-}
-
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
@@ -276,10 +199,14 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
err = -EINVAL;
goto err_exit;
}
- err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
- self->aq_nic_cfg.aq_hw_caps,
+
+ err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
+ if (err)
+ goto err_exit;
+
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
- if (err < 0)
+ if (err)
goto err_exit;
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -290,83 +217,39 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
}
#endif
+ for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
netif_carrier_off(self->ndev);
netif_tx_disable(self->ndev);
err = register_netdev(self->ndev);
- if (err < 0)
+ if (err)
goto err_exit;
err_exit:
return err;
}
-int aq_nic_ndev_init(struct aq_nic_s *self)
+void aq_nic_ndev_init(struct aq_nic_s *self)
{
- struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
- self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
- return 0;
-}
-
-void aq_nic_ndev_free(struct aq_nic_s *self)
-{
- if (!self->ndev)
- goto err_exit;
-
- if (self->ndev->reg_state == NETREG_REGISTERED)
- unregister_netdev(self->ndev);
-
- if (self->aq_hw)
- self->aq_hw_ops.destroy(self->aq_hw);
-
- free_netdev(self->ndev);
-
-err_exit:;
-}
-
-struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
-{
- struct aq_nic_s *self = NULL;
- int err = 0;
-
- if (!ndev) {
- err = -EINVAL;
- goto err_exit;
- }
- self = netdev_priv(ndev);
-
- if (!self) {
- err = -EINVAL;
- goto err_exit;
- }
- if (netif_running(ndev))
- netif_tx_disable(ndev);
- netif_carrier_off(self->ndev);
-
- for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
- self->aq_vecs++) {
- self->aq_vec[self->aq_vecs] =
- aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
- if (!self->aq_vec[self->aq_vecs]) {
- err = -ENOMEM;
- goto err_exit;
- }
- }
-
-err_exit:
- if (err < 0) {
- aq_nic_free_hot_resources(self);
- self = NULL;
- }
- return self;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -375,11 +258,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
self->aq_ring_tx[idx] = ring;
}
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
- return self->ndev->dev.parent;
-}
-
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
@@ -392,18 +270,20 @@ int aq_nic_init(struct aq_nic_s *self)
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
- err = self->aq_hw_ops.hw_reset(self->aq_hw);
+ err = self->aq_hw_ops->hw_reset(self->aq_hw);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
- aq_nic_get_ndev(self)->dev_addr);
+ err = self->aq_hw_ops->hw_init(self->aq_hw,
+ aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+ aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+
+ netif_carrier_off(self->ndev);
err_exit:
return err;
@@ -415,13 +295,13 @@ int aq_nic_start(struct aq_nic_s *self)
int err = 0;
unsigned int i = 0U;
- err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
self->packet_filter);
if (err < 0)
goto err_exit;
@@ -433,7 +313,7 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
- err = self->aq_hw_ops.hw_start(self->aq_hw);
+ err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -451,14 +331,14 @@ int aq_nic_start(struct aq_nic_s *self)
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+ err = aq_pci_func_alloc_irq(self, i,
self->ndev->name, aq_vec,
- aq_vec_get_affinity_mask(aq_vec));
+ aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
- err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+ err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
goto err_exit;
@@ -643,9 +523,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
- err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
- ring,
- frags);
+ err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ ring, frags);
if (err >= 0) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
@@ -660,14 +539,14 @@ err_exit:
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
{
- return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+ return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
}
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
- err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+ err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
if (err < 0)
goto err_exit;
@@ -699,11 +578,11 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
* multicast mask
*/
self->packet_filter |= IFF_ALLMULTI;
- self->aq_hw->aq_nic_cfg->mc_list_count = 0;
- return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ self->aq_nic_cfg.mc_list_count = 0;
+ return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
} else {
- return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
@@ -718,7 +597,7 @@ int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
- return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+ return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
}
unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
@@ -733,8 +612,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
regs->version = 1;
- err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
- &self->aq_hw_caps, regs_buff);
+ err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ regs_buff);
if (err < 0)
goto err_exit;
@@ -744,22 +624,45 @@ err_exit:
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
- return self->aq_hw_caps.mac_regs_count;
+ return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
unsigned int count = 0U;
- int err = 0;
+ struct aq_vec_s *aq_vec = NULL;
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
- err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
- if (err < 0)
+ if (!stats)
goto err_exit;
- data += count;
- count = 0U;
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats->dma_pkt_rc;
+ data[++i] = stats->dma_pkt_tc;
+ data[++i] = stats->dma_oct_rc;
+ data[++i] = stats->dma_oct_tc;
+ data[++i] = stats->dpc;
+
+ i++;
+
+ data += i;
for (i = 0U, aq_vec = self->aq_vec[0];
aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
@@ -768,45 +671,65 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
}
err_exit:;
- (void)err;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+ struct net_device *ndev = self->ndev;
+ struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+
+ ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
+ ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+ ndev->stats.rx_errors = stats->erpr;
+ ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
+ ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+ ndev->stats.tx_errors = stats->erpt;
+ ndev->stats.multicast = stats->mprc;
}
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
struct ethtool_link_ksettings *cmd)
{
- cmd->base.port = PORT_TP;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ cmd->base.port = PORT_FIBRE;
+ else
+ cmd->base.port = PORT_TP;
/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
cmd->base.duplex = DUPLEX_FULL;
cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
5000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
ethtool_link_ksettings_add_link_mode(cmd, supported,
2500baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
ethtool_link_ksettings_add_link_mode(cmd, supported,
1000baseT_Full);
- if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+ if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
- if (self->aq_hw_caps.flow_control)
+ if (self->aq_nic_cfg.aq_hw_caps->flow_control)
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
@@ -837,7 +760,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+ else
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
}
int aq_nic_set_link_ksettings(struct aq_nic_s *self,
@@ -848,7 +774,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
int err = 0;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- rate = self->aq_hw_caps.link_speed_msk;
+ rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
self->aq_nic_cfg.is_autoneg = true;
} else {
speed = cmd->base.speed;
@@ -879,7 +805,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
goto err_exit;
break;
}
- if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+ if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
err = -1;
goto err_exit;
}
@@ -887,7 +813,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
self->aq_nic_cfg.is_autoneg = false;
}
- err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+ err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
if (err < 0)
goto err_exit;
@@ -906,7 +832,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
u32 fw_version = 0U;
- self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+ self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
return fw_version;
}
@@ -921,18 +847,18 @@ int aq_nic_stop(struct aq_nic_s *self)
del_timer_sync(&self->service_timer);
- self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
del_timer_sync(&self->polling_timer);
else
- aq_pci_func_free_irqs(self->aq_pci_func);
+ aq_pci_func_free_irqs(self);
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
- return self->aq_hw_ops.hw_stop(self->aq_hw);
+ return self->aq_hw_ops->hw_stop(self->aq_hw);
}
void aq_nic_deinit(struct aq_nic_s *self)
@@ -948,23 +874,23 @@ void aq_nic_deinit(struct aq_nic_s *self)
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+ (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
} else {
- (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+ (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
self->power_state);
}
err_exit:;
}
-void aq_nic_free_hot_resources(struct aq_nic_s *self)
+void aq_nic_free_vectors(struct aq_nic_s *self)
{
unsigned int i = 0U;
if (!self)
goto err_exit;
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ for (i = ARRAY_SIZE(self->aq_vec); i--;) {
if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..d16b0f1a95aa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,26 +14,15 @@
#include "aq_common.h"
#include "aq_rss.h"
+#include "aq_hw.h"
struct aq_ring_s;
-struct aq_pci_func_s;
struct aq_hw_ops;
-
-#define AQ_NIC_FC_OFF 0U
-#define AQ_NIC_FC_TX 1U
-#define AQ_NIC_FC_RX 2U
-#define AQ_NIC_FC_FULL 3U
-#define AQ_NIC_FC_AUTO 4U
-
-#define AQ_NIC_RATE_10G BIT(0)
-#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2GS BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
+struct aq_fw_s;
+struct aq_vec_s;
struct aq_nic_cfg_s {
- struct aq_hw_caps_s *aq_hw_caps;
+ const struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
@@ -44,7 +33,6 @@ struct aq_nic_cfg_s {
u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
- u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
@@ -69,20 +57,43 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *et_ops,
- struct device *dev,
- struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- const struct aq_hw_ops *aq_hw_ops);
-int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s {
+ atomic_t flags;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ u8 port;
+ const struct aq_hw_ops *aq_hw_ops;
+ const struct aq_fw_ops *aq_fw_ops;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+
+ struct pci_dev *pdev;
+ unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+void aq_nic_ndev_init(struct aq_nic_s *self);
struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
struct aq_ring_s *ring);
-struct device *aq_nic_get_dev(struct aq_nic_s *self);
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
-int aq_nic_cfg_start(struct aq_nic_s *self);
+void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
@@ -93,6 +104,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
+void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711dc165..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
- struct aq_obj_s header;
- struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
- struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
- struct aq_hw_s *aq_hw;
- struct net_device *ndev;
- struct aq_pci_func_s *aq_pci_func;
- unsigned int aq_vecs;
- unsigned int packet_filter;
- unsigned int power_state;
- u8 port;
- struct aq_hw_ops aq_hw_ops;
- struct aq_hw_caps_s aq_hw_caps;
- struct aq_nic_cfg_s aq_nic_cfg;
- struct timer_list service_timer;
- struct timer_list polling_timer;
- struct aq_hw_link_status_s link_status;
- struct {
- u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
- } mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
- AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
- AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
- AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..22889fc158f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,180 +9,135 @@
/* File aq_pci_func.c: Definition of PCI functions. */
-#include "aq_pci_func.h"
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include "aq_main.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
-#include <linux/interrupt.h>
-
-struct aq_pci_func_s {
- struct pci_dev *pdev;
- struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
- void __iomem *mmio;
- void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
- resource_size_t mmio_pa;
- unsigned int msix_entry_mask;
- unsigned int ports;
- bool is_pci_enabled;
- bool is_regions;
- bool is_pci_using_dac;
- struct aq_hw_caps_s aq_hw_caps;
+#include "aq_pci_func.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
+
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
+
+ {}
};
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops)
-{
- struct aq_pci_func_s *self = NULL;
- int err = 0;
- unsigned int port = 0U;
-
- if (!aq_hw_ops) {
- err = -EFAULT;
- goto err_exit;
- }
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
- pci_set_drvdata(pdev, self);
- self->pdev = pdev;
+static const struct aq_board_revision_s hw_atl_boards[] = {
+ { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
+ { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
+
+ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+ { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
+ { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+ { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
+ { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
+
+ { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
+ { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
+ { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
+ { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
+ { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
+ { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
+
+ { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
+ { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
+};
- err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
- if (err < 0)
- goto err_exit;
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
- self->ports = self->aq_hw_caps.ports;
+static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
+ const struct aq_hw_ops **ops,
+ const struct aq_hw_caps_s **caps)
+{
+ int i = 0;
- for (port = 0; port < self->ports; ++port) {
- struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- &pdev->dev, self,
- port, aq_hw_ops);
+ if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
+ return -EINVAL;
- if (!aq_nic) {
- err = -ENOMEM;
- goto err_exit;
+ for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
+ if (hw_atl_boards[i].devid == pdev->device &&
+ (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
+ hw_atl_boards[i].revision == pdev->revision)) {
+ *ops = hw_atl_boards[i].ops;
+ *caps = hw_atl_boards[i].caps;
+ break;
}
- self->port[port] = aq_nic;
}
-err_exit:
- if (err < 0) {
- if (self)
- aq_pci_func_free(self);
- self = NULL;
- }
+ if (i == ARRAY_SIZE(hw_atl_boards))
+ return -EINVAL;
- (void)err;
- return self;
+ return 0;
}
-int aq_pci_func_init(struct aq_pci_func_s *self)
+int aq_pci_func_init(struct pci_dev *pdev)
{
int err = 0;
- unsigned int bar = 0U;
- unsigned int port = 0U;
- unsigned int numvecs = 0U;
-
- err = pci_enable_device(self->pdev);
- if (err < 0)
- goto err_exit;
- self->is_pci_enabled = true;
-
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
- self->is_pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
}
if (err) {
- err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(self->pdev,
+ err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
- self->is_pci_using_dac = 0;
}
if (err != 0) {
err = -ENOSR;
goto err_exit;
}
- err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+ err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
if (err < 0)
goto err_exit;
- self->is_regions = true;
+ pci_set_master(pdev);
- pci_set_master(self->pdev);
-
- for (bar = 0; bar < 4; ++bar) {
- if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
- resource_size_t reg_sz;
-
- self->mmio_pa = pci_resource_start(self->pdev, bar);
- if (self->mmio_pa == 0U) {
- err = -EIO;
- goto err_exit;
- }
-
- reg_sz = pci_resource_len(self->pdev, bar);
- if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
- err = -EIO;
- goto err_exit;
- }
-
- self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
- if (!self->mmio) {
- err = -EIO;
- goto err_exit;
- }
- break;
- }
- }
-
- numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
- numvecs = min(numvecs, num_online_cpus());
-
- /* enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
-
- if (err < 0) {
- err = pci_alloc_irq_vectors(self->pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (err < 0)
- goto err_exit;
- }
-#endif /* AQ_CFG_FORCE_LEGACY_INT */
-
- /* net device init */
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
-
- err = aq_nic_cfg_start(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_init(self->port[port]);
- if (err < 0)
- goto err_exit;
-
- err = aq_nic_ndev_register(self->port[port]);
- if (err < 0)
- goto err_exit;
- }
+ return 0;
err_exit:
- if (err < 0)
- aq_pci_func_deinit(self);
return err;
}
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
@@ -203,11 +158,10 @@ int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
-
return err;
}
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i = 0U;
@@ -223,12 +177,7 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
}
}
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
-{
- return self->mmio;
-}
-
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
{
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
@@ -237,62 +186,159 @@ unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
return AQ_HW_IRQ_LEGACY;
}
-void aq_pci_func_deinit(struct aq_pci_func_s *self)
+static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
{
- if (!self)
- goto err_exit;
-
- aq_pci_func_free_irqs(self);
pci_free_irq_vectors(self->pdev);
-
- if (self->is_regions)
- pci_release_regions(self->pdev);
-
- if (self->is_pci_enabled)
- pci_disable_device(self->pdev);
-
-err_exit:;
}
-void aq_pci_func_free(struct aq_pci_func_s *self)
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
{
- unsigned int port = 0U;
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+ struct net_device *ndev;
+ resource_size_t mmio_pa;
+ u32 bar;
+ u32 numvecs;
- if (!self)
- goto err_exit;
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
+ err = aq_pci_func_init(pdev);
+ if (err)
+ goto err_pci_func;
- aq_nic_ndev_free(self->port[port]);
+ ndev = aq_ndev_alloc();
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_ndev;
}
- if (self->mmio)
- iounmap(self->mmio);
+ self = netdev_priv(ndev);
+ self->pdev = pdev;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ pci_set_drvdata(pdev, self);
- kfree(self);
+ err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
+ &aq_nic_get_cfg(self)->aq_hw_caps);
+ if (err)
+ goto err_ioremap;
-err_exit:;
-}
+ self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg)
-{
- int err = 0;
- unsigned int port = 0U;
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
+ resource_size_t reg_sz;
- if (!self) {
- err = -EFAULT;
- goto err_exit;
+ mmio_pa = pci_resource_start(pdev, bar);
+ if (mmio_pa == 0U) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ reg_sz = pci_resource_len(pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
+ if (!self->aq_hw->mmio) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ break;
+ }
}
- for (port = 0; port < self->ports; ++port) {
- if (!self->port[port])
- continue;
- (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+ if (bar == 4) {
+ err = -EIO;
+ goto err_ioremap;
}
-err_exit:
+ numvecs = min((u8)AQ_CFG_VECS_DEF,
+ aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+ /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
+ PCI_IRQ_MSIX);
+
+ if (err < 0) {
+ err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (err < 0)
+ goto err_hwinit;
+ }
+#endif
+
+ /* net device init */
+ aq_nic_cfg_start(self);
+
+ aq_nic_ndev_init(self);
+
+ err = aq_nic_ndev_register(self);
+ if (err < 0)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+err_hwinit:
+ iounmap(self->aq_hw->mmio);
+err_ioremap:
+ free_netdev(ndev);
+err_pci_func:
+ pci_release_regions(pdev);
+err_ndev:
+ pci_disable_device(pdev);
return err;
}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ if (self->ndev) {
+ if (self->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(self->ndev);
+ aq_nic_free_vectors(self);
+ aq_pci_free_irq_vectors(self);
+ iounmap(self->aq_hw->mmio);
+ kfree(self->aq_hw);
+ pci_release_regions(pdev);
+ free_netdev(self->ndev);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_nic_s *self = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_nic_change_pm_state(self, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb033791203..aeee67bf69fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,22 +13,20 @@
#define AQ_PCI_FUNC_H
#include "aq_common.h"
+#include "aq_nic.h"
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
- struct pci_dev *pdev,
- const struct net_device_ops *ndev_ops,
- const struct ethtool_ops *eth_ops);
-int aq_pci_func_init(struct aq_pci_func_s *self);
-int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+struct aq_board_revision_s {
+ unsigned short devid;
+ unsigned short revision;
+ const struct aq_hw_ops *ops;
+ const struct aq_hw_caps_s *caps;
+};
+
+int aq_pci_func_init(struct pci_dev *pdev);
+int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec,
cpumask_t *affinity_mask);
-void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
-int aq_pci_func_start(struct aq_pci_func_s *self);
-void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
-unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
-void aq_pci_func_deinit(struct aq_pci_func_s *self);
-void aq_pci_func_free(struct aq_pci_func_s *self);
-int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
- pm_message_t *pm_msg);
+void aq_pci_func_free_irqs(struct aq_nic_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca6534b85..0be6a11370bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_record_rx_queue(skb, self->idx);
- napi_gro_receive(napi, skb);
-
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
+
+ napi_gro_receive(napi, skb);
}
err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 5844078764bd..965fae0fb6e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
#include "aq_common.h"
struct page;
+struct aq_nic_cfg_s;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
};
struct aq_ring_s {
- struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdfb874a..786ea8187c69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
#include "aq_common.h"
-#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
- atomic_t flags;
-};
-
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a099ef..f890b8a5a862 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
#include <linux/netdevice.h>
struct aq_vec_s {
- struct aq_obj_s header;
- struct aq_hw_ops *aq_hw_ops;
+ const struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
@@ -166,7 +165,7 @@ err_exit:
return self;
}
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b184236c..8bdf60bb3f63 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
struct aq_hw_s;
struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..67e2f9fb9402 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,67 +12,100 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_a0_internal.h"
-static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
-{
- memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_A0_RSS_MAX, \
+ .tcs = HW_ATL_A0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_A0_RXD_SIZE, \
+ .rxds = 248U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_A0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_A0_TX_RINGS, \
+ .rx_rings = HW_ATL_A0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_A0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_10G |
+ HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
-static void hw_atl_a0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
+ DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M,
+};
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
+ hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+ hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
+ hw_atl_glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
+ hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+ hw_atl_itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
@@ -88,51 +121,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -140,20 +175,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -182,11 +216,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -201,35 +236,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -240,38 +275,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -290,10 +325,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
@@ -301,9 +336,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -314,43 +347,44 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
int err = 0;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
hw_atl_a0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
- reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
- reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
- [(aq_nic_cfg->vecs > 1U) ?
- 1 : 0]);
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
- ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
- ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
- ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
@@ -361,28 +395,28 @@ err_exit:
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -468,36 +502,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw,
+ aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -509,25 +544,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -548,7 +583,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -557,13 +592,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
- ring->hw_head = hw_head_;
+ ring->hw_head = hw_head;
err = aq_hw_err_from_flags(self);
err_exit:
@@ -587,15 +622,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
- reg_rx_dma_desc_status_get(self, ring->idx)) {
- rdm_rx_desc_en_set(self, false, ring->idx);
- rdm_rx_desc_res_set(self, true, ring->idx);
- rdm_rx_desc_res_set(self, false, ring->idx);
- rdm_rx_desc_en_set(self, true, ring->idx);
+ hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+ hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+ hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
- (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ (hw_atl_rdm_rx_desc_head_ptr_get(self,
+ ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -678,26 +714,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
-
- if ((1U << 16) & reg_gen_irq_status_get(self))
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+ if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -708,18 +743,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2promiscuous_mode_en_set(self,
+ IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
- (i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -746,17 +783,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h,
+ HW_ATL_A0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_A0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
@@ -808,7 +847,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, itr_rx, i);
+ hw_atl_reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -822,38 +861,19 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_a0_create,
- .destroy = hw_atl_a0_destroy,
- .get_hw_caps = hw_atl_a0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_a0_hw_set_speed,
.hw_init = hw_atl_a0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -883,21 +903,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 1U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527954c9..25fe954def03 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -16,19 +16,11 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_a0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
-
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
-
-#endif
-
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+extern const struct aq_hw_ops hw_atl_ops_a0;
#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a0330cf0..1d8855558d74 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,69 +88,4 @@
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
-/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_A0_RSS_MAX,
- .tcs = HW_ATL_A0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_A0_RXD_SIZE,
- .rxds = 248U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_A0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_A0_TX_RINGS,
- .rx_rings = HW_ATL_A0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_A0_RATE_10G |
- HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_A0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
-};
-
#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..819f6bcf9b4e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,71 +12,89 @@
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
+#include "../aq_nic.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
+
+#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
+ .is_64_dma = true, \
+ .msix_irqs = 4U, \
+ .irq_mask = ~0U, \
+ .vecs = HW_ATL_B0_RSS_MAX, \
+ .tcs = HW_ATL_B0_TC_MAX, \
+ .rxd_alignment = 1U, \
+ .rxd_size = HW_ATL_B0_RXD_SIZE, \
+ .rxds = 4U * 1024U, \
+ .txd_alignment = 1U, \
+ .txd_size = HW_ATL_B0_TXD_SIZE, \
+ .txds = 8U * 1024U, \
+ .txhwb_alignment = 4096U, \
+ .tx_rings = HW_ATL_B0_TX_RINGS, \
+ .rx_rings = HW_ATL_B0_RX_RINGS, \
+ .hw_features = NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_LRO, \
+ .hw_priv_flags = IFF_UNICAST_FLT, \
+ .flow_control = true, \
+ .mtu = HW_ATL_B0_MTU_JUMBO, \
+ .mac_regs_count = 88, \
+ .hw_alive_check_addr = 0x10U
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
-{
- memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
- return 0;
-}
-
-static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
- unsigned int port,
- struct aq_hw_ops *ops)
-{
- struct hw_atl_s *self = NULL;
-
- self = kzalloc(sizeof(*self), GFP_KERNEL);
- if (!self)
- goto err_exit;
-
- self->base.aq_pci_func = aq_pci_func;
-
- self->base.not_ff_addr = 0x10U;
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-err_exit:
- return (struct aq_hw_s *)self;
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
-static void hw_atl_b0_destroy(struct aq_hw_s *self)
-{
- kfree(self);
-}
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M,
+};
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
- glb_glb_reg_res_dis_set(self, 1U);
- pci_pci_reg_res_dis_set(self, 0U);
- rx_rx_reg_res_dis_set(self, 0U);
- tx_tx_reg_res_dis_set(self, 0U);
-
- HW_ATL_FLUSH();
- glb_soft_res_set(self, 1);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
-
- itr_irq_reg_res_dis_set(self, 0U);
- itr_res_irq_set(self, 1U);
-
- /* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
- if (err < 0)
- goto err_exit;
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
- hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+ self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
-err_exit:
return err;
}
@@ -88,51 +106,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
- tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
- tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
- tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
- tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
- tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- tpb_tx_buff_hi_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 66U) /
- 100U, tc);
- tpb_tx_buff_lo_threshold_per_tc_set(self,
- (buff_size * (1024 / 32U) * 50U) /
- 100U, tc);
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024 / 32U) * 50U) /
+ 100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
- rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
- rpb_rx_buff_hi_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 66U) /
- 100U, tc);
- rpb_rx_buff_lo_threshold_per_tc_set(self,
- (buff_size *
- (1024U / 32U) * 50U) /
- 100U, tc);
- rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
- rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+ hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
@@ -140,20 +160,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
- struct aq_nic_cfg_s *cfg = NULL;
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
- cfg = self->aq_nic_cfg;
-
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
- rpf_rss_key_wr_data_set(self, key_data);
- rpf_rss_key_addr_set(self, addr);
- rpf_rss_key_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+ hw_atl_rpf_rss_key_addr_set(self, addr);
+ hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -182,11 +201,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
((i * 3U) & 0xFU));
}
- for (i = AQ_DIMOF(bitary); i--;) {
- rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
- rpf_rss_redir_tbl_addr_set(self, i);
- rpf_rss_redir_wr_en_set(self, 1U);
- AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ for (i = ARRAY_SIZE(bitary); i--;) {
+ hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+ hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+ 1000U, 10U);
if (err < 0)
goto err_exit;
}
@@ -203,15 +223,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
unsigned int i;
/* TX checksums offloads*/
- tpo_ipv4header_crc_offload_en_set(self, 1);
- tpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
- rpo_ipv4header_crc_offload_en_set(self, 1);
- rpo_tcp_udp_crc_offload_en_set(self, 1);
+ hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+ hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
/* LSO offloads*/
- tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
/* LRO offloads */
{
@@ -220,43 +240,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
- rpo_lro_max_num_of_descriptors_set(self, val, i);
+ hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
- rpo_lro_time_base_divider_set(self, 0x61AU);
- rpo_lro_inactive_interval_set(self, 0);
- rpo_lro_max_coalescing_interval_set(self, 2);
+ hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+ hw_atl_rpo_lro_inactive_interval_set(self, 0);
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
- rpo_lro_qsessions_lim_set(self, 1U);
+ hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
- rpo_lro_total_desc_lim_set(self, 2U);
+ hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
- rpo_lro_patch_optimization_en_set(self, 0U);
+ hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
- rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+ hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
- rpo_lro_pkt_lim_set(self, 1U);
+ hw_atl_rpo_lro_pkt_lim_set(self, 1U);
- rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_rpo_lro_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
- thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
- thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+ hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
- tdm_tx_dca_en_set(self, 0U);
- tdm_tx_dca_mode_set(self, 0U);
+ hw_atl_tdm_tx_dca_en_set(self, 0U);
+ hw_atl_tdm_tx_dca_mode_set(self, 0U);
- tpb_tx_path_scp_ins_en_set(self, 1U);
+ hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
@@ -267,55 +288,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
- rpb_rpf_rx_traf_class_mode_set(self, 1U);
+ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
- rpb_rx_flow_ctl_mode_set(self, 1U);
+ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
- reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
- rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
- rpfl2unicast_flr_act_set(self, 1U, i);
+ hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
- reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
- reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+ hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
- rpf_vlan_outer_etht_set(self, 0x88A8U);
- rpf_vlan_inner_etht_set(self, 0x8100U);
+ hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+ hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
- rpf_vlan_flr_act_set(self, 1U, 0U);
- rpf_vlan_id_flr_set(self, 0U, 0U);
- rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+ hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
- rpf_vlan_accept_untagged_packets_set(self, 1U);
- rpf_vlan_untagged_act_set(self, 1U);
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
- rpf_vlan_flr_act_set(self, 1U, 1U);
- rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- rpf_vlan_flr_en_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+ hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
- rpf_vlan_prom_mode_en_set(self, 1);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
- rpfl2broadcast_flr_act_set(self, 1U);
- rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+ hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+ hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
- rdm_rx_dca_en_set(self, 0U);
- rdm_rx_dca_mode_set(self, 0U);
+ hw_atl_rdm_rx_dca_en_set(self, 0U);
+ hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
@@ -334,10 +355,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
- rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
- rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
@@ -345,9 +366,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
- struct aq_nic_cfg_s *aq_nic_cfg,
- u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -357,38 +376,52 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
};
int err = 0;
+ u32 val;
- self->aq_nic_cfg = aq_nic_cfg;
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_B0->chip_features);
+ struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
hw_atl_b0_hw_mac_addr_set(self, mac_addr);
- hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
+ self->aq_fw_ops->set_state(self, MPI_INIT);
hw_atl_b0_hw_qos_set(self);
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+ aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+ (val & ~0x707) | 0x404);
+
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ self->aq_fw_ops->update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
- reg_irq_glb_ctl_set(self,
- aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ hw_atl_reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
- itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+ hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
- reg_gen_irq_map_set(self,
- ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ hw_atl_reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -400,28 +433,28 @@ err_exit:
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 1, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 1, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
- tpb_tx_buff_en_set(self, 1);
- rpb_rx_buff_en_set(self, 1);
+ hw_atl_tpb_tx_buff_en_set(self, 1);
+ hw_atl_rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
@@ -507,36 +540,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- rdm_rx_desc_en_set(self, false, aq_ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
- aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
- reg_rx_dma_desc_base_addressmswset(self,
- dma_desc_addr_msw, aq_ring->idx);
+ hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
- rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
- rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ hw_atl_rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
- rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
- rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
- rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
- itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
- rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
- rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -548,25 +581,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
- reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
- reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
- aq_ring->idx);
+ hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
- tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
- tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
- itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
- itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+ hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
- tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
- tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -587,7 +620,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
rxd->hdr_addr = 0U;
}
- reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
@@ -596,9 +629,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
- unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+ unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
- if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
@@ -701,22 +734,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
- itr_irq_msk_clearlsw_set(self, LODWORD(mask));
- itr_irq_status_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
- atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ atomic_inc(&self->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
- *mask = itr_irq_statuslsw_get(self);
+ *mask = hw_atl_itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
@@ -727,20 +760,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
unsigned int i = 0U;
- rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
- rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ hw_atl_rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
- rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ hw_atl_rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
- rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+ hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled &&
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
@@ -769,16 +802,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
- rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addresslsw_set(self,
- l, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
- rpfl2unicast_dest_addressmsw_set(self,
- h, HW_ATL_B0_MAC_MIN + i);
+ hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
- rpfl2_uc_flr_en_set(self,
- (self->aq_nic_cfg->is_mc_list_enabled),
+ hw_atl_rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
@@ -797,10 +830,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
- tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
- tdm_tdm_intr_moder_en_set(self, 1U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
- rdm_rdm_intr_moder_en_set(self, 1U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
@@ -860,18 +893,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
- tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
- tdm_tdm_intr_moder_en_set(self, 0U);
- rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
- rdm_rdm_intr_moder_en_set(self, 0U);
+ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+ hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -886,38 +919,19 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- tdm_tx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- rdm_rx_desc_en_set(self, 0U, ring->idx);
+ hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
-static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
-{
- int err = 0;
-
- err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
- if (err < 0)
- goto err_exit;
-
-err_exit:
- return err;
-}
-
-static struct aq_hw_ops hw_atl_ops_ = {
- .create = hw_atl_b0_create,
- .destroy = hw_atl_b0_destroy,
- .get_hw_caps = hw_atl_b0_get_hw_caps,
-
- .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
- .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
- .hw_set_link_speed = hw_atl_b0_hw_set_speed,
.hw_init = hw_atl_b0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
@@ -947,21 +961,6 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
- .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
-
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
-{
- bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
- bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
- (pdev->device == HW_ATL_DEVICE_ID_D100) ||
- (pdev->device == HW_ATL_DEVICE_ID_D107) ||
- (pdev->device == HW_ATL_DEVICE_ID_D108) ||
- (pdev->device == HW_ATL_DEVICE_ID_D109));
-
- bool is_rev_ok = (pdev->revision == 2U);
-
- return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
-}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6c1f3..2cc8dacfdc27 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -16,19 +16,27 @@
#include "../aq_common.h"
-#ifndef PCI_VENDOR_ID_AQUANTIA
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
-#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
-#define HW_ATL_DEVICE_ID_0001 0x0001
-#define HW_ATL_DEVICE_ID_D100 0xD100
-#define HW_ATL_DEVICE_ID_D107 0xD107
-#define HW_ATL_DEVICE_ID_D108 0xD108
-#define HW_ATL_DEVICE_ID_D109 0xD109
+#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
-#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
+#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
+#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
-#endif
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
+#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
+
+extern const struct aq_hw_ops hw_atl_ops_b0;
+
+#define hw_atl_ops_b1 hw_atl_ops_b0
#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6edfca2..405d1455c222 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,70 +142,6 @@
#define HW_ATL_INTR_MODER_MAX 0x1FF
#define HW_ATL_INTR_MODER_MIN 0xFF
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
- u64 buf_addr;
- u32 ctl;
- u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
- u32 rsvd;
- u32 len;
- u32 ctl;
- u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
- u64 buf_addr;
- u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
- u32 type;
- u32 rss_hash;
- u16 status;
- u16 pkt_len;
- u16 next_desc_ptr;
- u16 vlan;
-};
-
/* HW layer capabilities */
-static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
- .ports = 1U,
- .is_64_dma = true,
- .msix_irqs = 4U,
- .irq_mask = ~0U,
- .vecs = HW_ATL_B0_RSS_MAX,
- .tcs = HW_ATL_B0_TC_MAX,
- .rxd_alignment = 1U,
- .rxd_size = HW_ATL_B0_RXD_SIZE,
- .rxds = 8U * 1024U,
- .txd_alignment = 1U,
- .txd_size = HW_ATL_B0_TXD_SIZE,
- .txds = 8U * 1024U,
- .txhwb_alignment = 4096U,
- .tx_rings = HW_ATL_B0_TX_RINGS,
- .rx_rings = HW_ATL_B0_RX_RINGS,
- .hw_features = NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_LRO,
- .hw_priv_flags = IFF_UNICAST_FLT,
- .link_speed_msk = (HW_ATL_B0_RATE_10G |
- HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M),
- .flow_control = true,
- .mtu = HW_ATL_B0_MTU_JUMBO,
- .mac_regs_count = 88,
- .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
-};
#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651afa8c7..10ba035dadb1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
#include "../aq_hw_utils.h"
/* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore)
{
- aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
}
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
- return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
}
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
- glb_reg_res_dis_msk,
- glb_reg_res_dis_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+ HW_ATL_GLB_REG_RES_DIS_MSK,
+ HW_ATL_GLB_REG_RES_DIS_SHIFT,
glb_reg_res_dis);
}
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
- aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
- glb_soft_res_shift, soft_res);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
}
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
- glb_soft_res_msk,
- glb_soft_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+ HW_ATL_GLB_SOFT_RES_MSK,
+ HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
}
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
}
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
}
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
}
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
}
/* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw)
{
- aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
}
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
irq_map_en_rx);
}
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
irq_map_en_tx);
}
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
- 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+ 0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
irq_map_rx);
}
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
- 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
- 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
- 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
- 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ 0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
- 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+ 0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
irq_map_tx);
}
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
}
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
- aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
}
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
- itr_reg_res_dsbl_msk,
- itr_reg_res_dsbl_shift, irq_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+ HW_ATL_ITR_REG_RES_DSBL_MSK,
+ HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
}
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
{
- aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
}
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
}
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT);
}
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
- aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
- itr_res_shift, res_irq);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+ HW_ATL_ITR_RES_SHIFT, res_irq);
}
/* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
- rdm_dcadcpuid_msk,
- rdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+ HW_ATL_RDM_DCADCPUID_MSK,
+ HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
}
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
- rdm_dca_en_shift, rx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+ HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
}
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
- rdm_dca_mode_shift, rx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+ HW_ATL_RDM_DCA_MODE_MSK,
+ HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
}
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
- rdm_descddata_size_msk,
- rdm_descddata_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+ HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
rx_desc_data_buff_size);
}
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
- rdm_dcaddesc_en_msk,
- rdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_RDM_DCADDESC_EN_MSK,
+ HW_ATL_RDM_DCADDESC_EN_SHIFT,
rx_desc_dca_en);
}
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
- rdm_descden_msk,
- rdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDEN_MSK,
+ HW_ATL_RDM_DESCDEN_SHIFT,
rx_desc_en);
}
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
- rdm_descdhdr_size_msk,
- rdm_descdhdr_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+ HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
rx_desc_head_buff_size);
}
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
- rdm_descdhdr_split_msk,
- rdm_descdhdr_split_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+ HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+ HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
rx_desc_head_splitting);
}
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
- rdm_descdhd_msk, rdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+ HW_ATL_RDM_DESCDHD_MSK,
+ HW_ATL_RDM_DESCDHD_SHIFT);
}
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
- rdm_descdlen_msk, rdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
rx_desc_len);
}
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
- rdm_descdreset_msk, rdm_descdreset_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+ HW_ATL_RDM_DESCDRESET_MSK,
+ HW_ATL_RDM_DESCDRESET_SHIFT,
rx_desc_res);
}
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
- rdm_int_desc_wrb_en_msk,
- rdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
rx_desc_wr_wb_irq_en);
}
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
- rdm_dcadhdr_en_msk,
- rdm_dcadhdr_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+ HW_ATL_RDM_DCADHDR_EN_MSK,
+ HW_ATL_RDM_DCADHDR_EN_SHIFT,
rx_head_dca_en);
}
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
- rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+ HW_ATL_RDM_DCADPAY_EN_MSK,
+ HW_ATL_RDM_DCADPAY_EN_SHIFT,
rx_pld_dca_en);
}
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en)
{
- aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
- rdm_int_rim_en_msk,
- rdm_int_rim_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+ HW_ATL_RDM_INT_RIM_EN_MSK,
+ HW_ATL_RDM_INT_RIM_EN_SHIFT,
rdm_intr_moder_en);
}
/* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx)
{
- aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+ aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
}
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
}
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
- aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
}
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
- aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+ aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
}
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
rx_dma_desc_base_addrlsw);
}
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
rx_dma_desc_base_addrmsw);
}
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+ return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
}
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
rx_dma_desc_tail_ptr);
}
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+ rx_flr_mcst_flr_msk);
}
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+ rx_flr_mcst_flr);
}
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1)
{
- aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+ rx_flr_rss_control1);
}
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+ u32 rx_filter_control2)
{
- aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
}
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
rx_intr_moderation_ctl);
}
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl)
{
- aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
}
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
tx_dma_desc_base_addrlsw);
}
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
- u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
tx_dma_desc_base_addrmsw);
}
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor)
{
- aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
tx_dma_desc_tail_ptr);
}
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
{
- aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
- rpb_dma_sys_lbk_msk,
- rpb_dma_sys_lbk_shift, dma_sys_lbk);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+ HW_ATL_RPB_DMA_SYS_LBK_MSK,
+ HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
}
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
- rpb_rpf_rx_tc_mode_msk,
- rpb_rpf_rx_tc_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
rx_traf_class_mode);
}
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
- rpb_rx_buf_en_shift, rx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+ HW_ATL_RPB_RX_BUF_EN_MSK,
+ HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
}
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
- rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBHI_THRESH_MSK,
+ HW_ATL_RPB_RXBHI_THRESH_SHIFT,
rx_buff_hi_threshold_per_tc);
}
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
- rpb_rxblo_thresh_msk,
- rpb_rxblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+ HW_ATL_RPB_RXBLO_THRESH_MSK,
+ HW_ATL_RPB_RXBLO_THRESH_SHIFT,
rx_buff_lo_threshold_per_tc);
}
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
- rpb_rx_fc_mode_msk,
- rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+ HW_ATL_RPB_RX_FC_MODE_MSK,
+ HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
- rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+ HW_ATL_RPB_RXBBUF_SIZE_MSK,
+ HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
rx_pkt_buff_size_per_tc);
}
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
- rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+ HW_ATL_RPB_RXBXOFF_EN_MSK,
+ HW_ATL_RPB_RXBXOFF_EN_SHIFT,
rx_xoff_en_per_tc);
}
/* rpf */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
- rpfl2bc_thresh_msk,
- rpfl2bc_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+ HW_ATL_RPFL2BC_THRESH_MSK,
+ HW_ATL_RPFL2BC_THRESH_SHIFT,
l2broadcast_count_threshold);
}
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
- rpfl2bc_en_shift, l2broadcast_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+ HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
}
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
- rpfl2bc_act_shift, l2broadcast_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+ HW_ATL_RPFL2BC_ACT_MSK,
+ HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
}
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
- rpfl2mc_enf_msk,
- rpfl2mc_enf_shift, l2multicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+ HW_ATL_RPFL2MC_ENF_MSK,
+ HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
- rpfl2promis_mode_msk,
- rpfl2promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+ HW_ATL_RPFL2PROMIS_MODE_MSK,
+ HW_ATL_RPFL2PROMIS_MODE_SHIFT,
l2promiscuous_mode_en);
}
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
- rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+ HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
l2unicast_flr_act);
}
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
- rpfl2uc_enf_msk,
- rpfl2uc_enf_shift, l2unicast_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+ HW_ATL_RPFL2UC_ENF_MSK,
+ HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
}
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
{
- aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
l2unicast_dest_addresslsw);
}
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
- u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
- rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+ HW_ATL_RPFL2UC_DAFMSW_MSK,
+ HW_ATL_RPFL2UC_DAFMSW_SHIFT,
l2unicast_dest_addressmsw);
}
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
- rpfl2mc_accept_all_msk,
- rpfl2mc_accept_all_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+ HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
l2_accept_all_mc_packets);
}
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
- 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+ 0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
user_priority_tc_map);
}
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
- rpf_rss_key_addr_msk,
- rpf_rss_key_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+ HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+ HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
rss_key_addr);
}
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
- aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
rss_key_wr_data);
}
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
}
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
- rpf_rss_key_wr_eni_msk,
- rpf_rss_key_wr_eni_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
rss_key_wr_en);
}
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
- rpf_rss_redir_addr_msk,
- rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+ HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+ HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+ rss_redir_tbl_addr);
}
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
- rpf_rss_redir_wr_data_msk,
- rpf_rss_redir_wr_data_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
rss_redir_tbl_wr_data);
}
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
}
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
- rpf_rss_redir_wr_eni_msk,
- rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+ HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
}
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk)
{
- aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
- rpf_tpo_rpf_sys_lbk_msk,
- rpf_tpo_rpf_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+ HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
tpo_to_rpf_sys_lbk);
}
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
- rpf_vl_inner_tpid_msk,
- rpf_vl_inner_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+ HW_ATL_RPF_VL_INNER_TPID_MSK,
+ HW_ATL_RPF_VL_INNER_TPID_SHIFT,
vlan_inner_etht);
}
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
- rpf_vl_outer_tpid_msk,
- rpf_vl_outer_tpid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+ HW_ATL_RPF_VL_OUTER_TPID_MSK,
+ HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
vlan_outer_etht);
}
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
- rpf_vl_promis_mode_msk,
- rpf_vl_promis_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+ HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+ HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
vlan_prom_mode_en);
}
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
- rpf_vl_accept_untagged_mode_msk,
- rpf_vl_accept_untagged_mode_shift,
- vlan_accept_untagged_packets);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+ HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+ vlan_acc_untagged_packets);
}
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
- rpf_vl_untagged_act_msk,
- rpf_vl_untagged_act_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+ HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
vlan_untagged_act);
}
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
- rpf_vl_en_f_msk,
- rpf_vl_en_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_EN_F_MSK,
+ HW_ATL_RPF_VL_EN_F_SHIFT,
vlan_flr_en);
}
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
- rpf_vl_act_f_msk,
- rpf_vl_act_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+ HW_ATL_RPF_VL_ACT_F_MSK,
+ HW_ATL_RPF_VL_ACT_F_SHIFT,
vlan_flr_act);
}
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
- rpf_vl_id_f_msk,
- rpf_vl_id_f_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+ HW_ATL_RPF_VL_ID_F_MSK,
+ HW_ATL_RPF_VL_ID_F_SHIFT,
vlan_id_flr);
}
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
- rpf_et_enf_msk,
- rpf_et_enf_shift, etht_flr_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+ HW_ATL_RPF_ET_ENF_MSK,
+ HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
}
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
- rpf_et_upfen_msk, rpf_et_upfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+ HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
etht_user_priority_en);
}
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
- rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+ HW_ATL_RPF_ET_RXQFEN_MSK,
+ HW_ATL_RPF_ET_RXQFEN_SHIFT,
etht_rx_queue_en);
}
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
- rpf_et_upf_msk,
- rpf_et_upf_shift, etht_user_priority);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+ HW_ATL_RPF_ET_UPF_MSK,
+ HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
}
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
- rpf_et_rxqf_msk,
- rpf_et_rxqf_shift, etht_rx_queue);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_RXQF_MSK,
+ HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
}
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
- rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+ HW_ATL_RPF_ET_MNG_RXQF_MSK,
+ HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
etht_mgt_queue);
}
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
- rpf_et_actf_msk,
- rpf_et_actf_shift, etht_flr_act);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+ HW_ATL_RPF_ET_ACTF_MSK,
+ HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
}
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
- aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
- rpf_et_valf_msk,
- rpf_et_valf_shift, etht_flr);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+ HW_ATL_RPF_ET_VALF_MSK,
+ HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
/* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
- rpo_ipv4chk_en_msk,
- rpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+ HW_ATL_RPO_IPV4CHK_EN_MSK,
+ HW_ATL_RPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
- rpo_descdvl_strip_msk,
- rpo_descdvl_strip_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+ HW_ATL_RPO_DESCDVL_STRIP_MSK,
+ HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
rx_desc_vlan_stripping);
}
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
- rpol4chk_en_shift, tcp_udp_crc_offload_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+ HW_ATL_RPOL4CHK_EN_MSK,
+ HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
}
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
- aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
}
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
- rpo_lro_ptopt_en_msk,
- rpo_lro_ptopt_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+ HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+ HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
lro_patch_optimization_en);
}
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
- u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
- rpo_lro_qses_lmt_msk,
- rpo_lro_qses_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+ HW_ATL_RPO_LRO_QSES_LMT_MSK,
+ HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
lro_qsessions_lim);
}
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
- rpo_lro_tot_dsc_lmt_msk,
- rpo_lro_tot_dsc_lmt_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+ HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
lro_total_desc_lim);
}
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
- rpo_lro_pkt_min_msk,
- rpo_lro_pkt_min_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+ HW_ATL_RPO_LRO_PKT_MIN_MSK,
+ HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
lro_min_pld_of_first_pkt);
}
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
- aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+ aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
}
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_number_of_descriptors,
- u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
lro_max_number_of_descriptors);
}
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
- rpo_lro_tb_div_msk,
- rpo_lro_tb_div_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+ HW_ATL_RPO_LRO_TB_DIV_MSK,
+ HW_ATL_RPO_LRO_TB_DIV_SHIFT,
lro_time_base_divider);
}
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
- rpo_lro_ina_ival_msk,
- rpo_lro_ina_ival_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+ HW_ATL_RPO_LRO_INA_IVAL_MSK,
+ HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
lro_inactive_interval);
}
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval)
{
- aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
- rpo_lro_max_ival_msk,
- rpo_lro_max_ival_shift,
- lro_max_coalescing_interval);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+ HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+ HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+ lro_max_coal_interval);
}
/* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
- rx_reg_res_dsbl_msk,
- rx_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+ HW_ATL_RX_REG_RES_DSBL_MSK,
+ HW_ATL_RX_REG_RES_DSBL_SHIFT,
rx_reg_res_dis);
}
/* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
- tdm_dcadcpuid_msk,
- tdm_dcadcpuid_shift, cpuid);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+ HW_ATL_TDM_DCADCPUID_MSK,
+ HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
}
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
{
- aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+ aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
}
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
- tdm_dca_en_shift, tx_dca_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+ HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
}
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
- tdm_dca_mode_shift, tx_dca_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+ HW_ATL_TDM_DCA_MODE_MSK,
+ HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
}
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca)
{
- aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
- tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+ HW_ATL_TDM_DCADDESC_EN_MSK,
+ HW_ATL_TDM_DCADDESC_EN_SHIFT,
tx_desc_dca_en);
}
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
- tdm_descden_msk,
- tdm_descden_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDEN_MSK,
+ HW_ATL_TDM_DESCDEN_SHIFT,
tx_desc_en);
}
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
- return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
- tdm_descdhd_msk, tdm_descdhd_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+ HW_ATL_TDM_DESCDHD_MSK,
+ HW_ATL_TDM_DESCDHD_SHIFT);
}
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
- tdm_descdlen_msk,
- tdm_descdlen_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+ HW_ATL_TDM_DESCDLEN_MSK,
+ HW_ATL_TDM_DESCDLEN_SHIFT,
tx_desc_len);
}
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
- tdm_int_desc_wrb_en_msk,
- tdm_int_desc_wrb_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+ HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+ HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
tx_desc_wr_wb_irq_en);
}
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
- u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
{
- aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
- tdm_descdwrb_thresh_msk,
- tdm_descdwrb_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+ HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+ HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
tx_desc_wr_wb_threshold);
}
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
{
- aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
- tdm_int_mod_en_msk,
- tdm_int_mod_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+ HW_ATL_TDM_INT_MOD_EN_MSK,
+ HW_ATL_TDM_INT_MOD_EN_SHIFT,
tdm_irq_moderation_en);
}
/* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
- thm_lso_tcp_flag_first_msk,
- thm_lso_tcp_flag_first_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
lso_tcp_flag_of_first_pkt);
}
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
- thm_lso_tcp_flag_last_msk,
- thm_lso_tcp_flag_last_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
lso_tcp_flag_of_last_pkt);
}
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
{
- aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
- thm_lso_tcp_flag_mid_msk,
- thm_lso_tcp_flag_mid_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+ HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
- tpb_tx_buf_en_shift, tx_buff_en);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+ HW_ATL_TPB_TX_BUF_EN_MSK,
+ HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
- tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBHI_THRESH_MSK,
+ HW_ATL_TPB_TXBHI_THRESH_SHIFT,
tx_buff_hi_threshold_per_tc);
}
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
- tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+ HW_ATL_TPB_TXBLO_THRESH_MSK,
+ HW_ATL_TPB_TXBLO_THRESH_SHIFT,
tx_buff_lo_threshold_per_tc);
}
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
- tpb_dma_sys_lbk_msk,
- tpb_dma_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+ HW_ATL_TPB_DMA_SYS_LBK_MSK,
+ HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
tx_dma_sys_lbk_en);
}
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
- aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
- tpb_txbbuf_size_msk,
- tpb_txbbuf_size_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+ HW_ATL_TPB_TXBBUF_SIZE_MSK,
+ HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
tx_pkt_buff_size_per_tc);
}
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
- aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
- tpb_tx_scp_ins_en_msk,
- tpb_tx_scp_ins_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+ HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+ HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
- tpo_ipv4chk_en_msk,
- tpo_ipv4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+ HW_ATL_TPO_IPV4CHK_EN_MSK,
+ HW_ATL_TPO_IPV4CHK_EN_SHIFT,
ipv4header_crc_offload_en);
}
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
{
- aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
- tpol4chk_en_msk,
- tpol4chk_en_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+ HW_ATL_TPOL4CHK_EN_MSK,
+ HW_ATL_TPOL4CHK_EN_SHIFT,
tcp_udp_crc_offload_en);
}
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en)
{
- aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
- tpo_pkt_sys_lbk_msk,
- tpo_pkt_sys_lbk_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+ HW_ATL_TPO_PKT_SYS_LBK_MSK,
+ HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
- tps_data_tc_arb_mode_msk,
- tps_data_tc_arb_mode_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
tx_pkt_shed_data_arb_mode);
}
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
- tps_desc_rate_ta_rst_msk,
- tps_desc_rate_ta_rst_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+ HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+ HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
curr_time_res);
}
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
- tps_desc_rate_lim_msk,
- tps_desc_rate_lim_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+ HW_ATL_TPS_DESC_RATE_LIM_MSK,
+ HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
tx_pkt_shed_desc_rate_lim);
}
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
- tps_desc_tc_arb_mode_msk,
- tps_desc_tc_arb_mode_shift,
- tx_pkt_shed_desc_tc_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
- tps_desc_tctcredit_max_msk,
- tps_desc_tctcredit_max_shift,
- tx_pkt_shed_desc_tc_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
- tps_desc_tctweight_msk,
- tps_desc_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
}
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode)
{
- aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
- tps_desc_vm_arb_mode_msk,
- tps_desc_vm_arb_mode_shift,
- tx_pkt_shed_desc_vm_arb_mode);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+ HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+ arb_mode);
}
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
- u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
- tps_data_tctcredit_max_msk,
- tps_data_tctcredit_max_shift,
- tx_pkt_shed_tc_data_max_credit);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+ HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+ max_credit);
}
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc)
{
- aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
- tps_data_tctweight_msk,
- tps_data_tctweight_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+ HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+ HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
/* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
- tx_reg_res_dsbl_msk,
- tx_reg_res_dsbl_shift, tx_reg_res_dis);
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+ HW_ATL_TX_REG_RES_DSBL_MSK,
+ HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
}
/* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
- msm_reg_access_busy_msk,
- msm_reg_access_busy_shift);
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+ HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+ HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
}
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
- msm_reg_addr_msk,
- msm_reg_addr_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+ HW_ATL_MSM_REG_ADDR_MSK,
+ HW_ATL_MSM_REG_ADDR_SHIFT,
reg_addr_for_indirect_addr);
}
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
- msm_reg_rd_strobe_msk,
- msm_reg_rd_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+ HW_ATL_MSM_REG_RD_STROBE_MSK,
+ HW_ATL_MSM_REG_RD_STROBE_SHIFT,
reg_rd_strobe);
}
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+ return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
}
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
- aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+ aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
}
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
- aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
- msm_reg_wr_strobe_msk,
- msm_reg_wr_strobe_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+ HW_ATL_MSM_REG_WR_STROBE_MSK,
+ HW_ATL_MSM_REG_WR_STROBE_SHIFT,
reg_wr_strobe);
}
/* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
- aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
- pci_reg_res_dsbl_msk,
- pci_reg_res_dsbl_shift,
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+ HW_ATL_PCI_REG_RES_DSBL_MSK,
+ HW_ATL_PCI_REG_RES_DSBL_SHIFT,
pci_reg_res_dis);
}
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
- u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
{
- aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b95adb..dfb426f2dc2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
- u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
/* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+ u32 rx);
/* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+ u32 tx);
/* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
- u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
/* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
/* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
/* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
/* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
- u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
/* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+ u32 dca);
/* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_head_buff_size,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
/* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
- u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+ u32 regidx);
/* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
- u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
- u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
/* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
- u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
- u32 tx_dma_desc_tail_ptr,
- u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
- u32 tx_intr_moderation_ctl,
- u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
/* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
- u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp,
+ u32 scratch_scp);
/* rpb */
/* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
- u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
/* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_hi_threshold_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
/* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 rx_pkt_buff_size_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
- u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+ u32 l2multicast_flr_en,
+ u32 filter);
/* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
- u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
- u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_flr_act,
+ u32 filter);
/* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
- u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
/* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
- u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
- u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
- u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
/* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
- u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
- u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+ u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
- u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_acc_untagged_packets);
/* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+ u32 filter);
/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
- u32 filter);
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+ u32 filter);
/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+ u32 filter);
/* set ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
- u32 etht_user_priority_en, u32 filter);
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en,
+ u32 filter);
/* set ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_rx_queue_en,
+ u32 filter);
/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
- u32 filter);
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
- u32 filter);
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority,
+ u32 filter);
/* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
- u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
/* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
- u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
/* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
- u32 rx_desc_vlan_stripping,
- u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
/* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
- u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
- u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
- u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
- u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coal_interval);
/* rx */
/* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
- u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
/* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+ u32 descriptor);
/* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+ u32 dca);
/* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
- u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
/* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
- u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
- u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
- u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
- u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
- u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 arb_mode);
/* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
- u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
/* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..e0cf70120f1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
*/
-#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore) (0x000003a0u + (semaphore) * 0x4)
/* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
/* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
/* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
/* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
/* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
/* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
/* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
/* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
/* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
/* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
/* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
/* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
/* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
/* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
/* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
/* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
/* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
/* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
/* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
/* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
/* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
/* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
/* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
/* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
/* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
/* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
/* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
/* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
/* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
/* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* rx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
*/
/* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
/* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
/* rx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
/* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
/* rx desc{d}_data_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
*/
/* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
/* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
/* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
/* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
/* rx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
/* rx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
/* rx desc{d}_hdr_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
*/
/* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+ (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
/* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
/* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
/* rx desc{d}_hdr_split bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
*/
/* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
/* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
/* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
/* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
/* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
/* rx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
/* rx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
/* rx desc{d}_reset bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
*/
/* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
/* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
/* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
/* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
/* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
/* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
/* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
/* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* rx dca{d}_hdr_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
*/
/* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
/* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
/* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
/* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
/* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
/* rx dca{d}_pay_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
*/
/* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
/* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
/* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
/* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
/* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
/* RX rdm_int_rim_en Bitfield Definitions
* Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
*/
/* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
/* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
/* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
/* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
/* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
/* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
/* general interrupt mapping register definitions
* preprocessor definitions for general interrupt mapping register
* base address: 0x00002180
* parameter: regidx {f} | stride size 0x4 | range [0, 3]
*/
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
/* general interrupt status register definitions
* preprocessor definitions for general interrupt status register
* address: 0x000021A0
*/
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
/* interrupt global control register definitions
* preprocessor definitions for interrupt global control register
* address: 0x00002300
*/
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
/* interrupt throttle register definitions
* preprocessor definitions for interrupt throttle register
* base address: 0x00002800
* parameter: throttle {t} | stride size 0x4 | range [0, 31]
*/
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
/* rx dma descriptor base address lsw definitions
* preprocessor definitions for rx dma descriptor base address lsw
* base address: 0x00005b00
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00005b00u + (descriptor) * 0x20)
/* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
* base address: 0x00005b04
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00005b04u + (descriptor) * 0x20)
/* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
* base address: 0x00005b14
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+ (0x00005b14u + (descriptor) * 0x20)
/* rx dma descriptor tail pointer register definitions
* preprocessor definitions for rx dma descriptor tail pointer register
* base address: 0x00005b10
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00005b10u + (descriptor) * 0x20)
/* rx interrupt moderation control register definitions
* Preprocessor definitions for RX Interrupt Moderation Control Register
* Base Address: 0x00005A40
* Parameter: RIM {R} | stride size 0x4 | range [0, 31]
*/
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
/* rx filter multicast filter mask register definitions
* preprocessor definitions for rx filter multicast filter mask register
* address: 0x00005270
*/
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
/* rx filter multicast filter register definitions
* preprocessor definitions for rx filter multicast filter register
* base address: 0x00005250
* parameter: filter {f} | stride size 0x4 | range [0, 7]
*/
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
/* RX Filter RSS Control Register 1 Definitions
* Preprocessor definitions for RX Filter RSS Control Register 1
* Address: 0x000054C0
*/
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
/* RX Filter Control Register 2 Definitions
* Preprocessor definitions for RX Filter Control Register 2
* Address: 0x00005104
*/
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
/* tx tx dma debug control [1f:0] bitfield definitions
* preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
*/
/* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
/* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
/* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
/* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
/* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
/* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
/* tx dma descriptor base address lsw definitions
* preprocessor definitions for tx dma descriptor base address lsw
* base address: 0x00007c00
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
(0x00007c00u + (descriptor) * 0x40)
/* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
* base address: 0x00007c10
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+ (0x00007c10u + (descriptor) * 0x40)
/* rx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
*/
/* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
/* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
/* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
/* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
/* rx rx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
*/
/* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
/* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
/* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
/* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
/* rx rx{b}_hi_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
*/
/* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
/* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
/* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
/* rx rx{b}_lo_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
*/
/* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
/* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
/* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
/* rx rx_fc_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
*/
/* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
/* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
/* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
/* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
/* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
/* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
/* rx rx{b}_buf_size[8:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
*/
/* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
/* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
/* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
/* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
/* rx rx{b}_xoff_en bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
*/
/* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
/* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
/* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
/* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
/* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
/* rx l2_bc_thresh[f:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
*/
/* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
/* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
/* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
/* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
/* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
/* rx l2_bc_en bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
*/
/* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
/* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
/* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
/* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
/* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
/* rx l2_bc_act[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
*/
/* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
/* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
/* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
/* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
/* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
/* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
/* rx l2_mc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
*/
/* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
/* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
/* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
/* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
/* rx l2_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
*/
/* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
/* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
/* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
/* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
/* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
/* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
/* rx l2_uc_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
*/
/* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
/* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
/* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
/* rx l2_uc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
*/
/* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
/* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
/* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
/* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
/* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
/* rx l2_mc_accept_all bitfield definitions
* Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
*/
/* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
/* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
/* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
/* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
/* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
/* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
/* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
/* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
/* rx rss_key_addr[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
*/
/* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
/* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
/* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
/* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
/* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
/* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
/* rx rss_key_wr_data[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
*/
/* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
/* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
/* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
/* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
/* rx rss_key_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
*/
/* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
/* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
/* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
/* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
/* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
/* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
/* rx rss_redir_addr[3:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
*/
/* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
/* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
/* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
/* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
/* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
/* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
/* rx rss_redir_wr_data[f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
*/
/* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
/* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
/* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
/* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
/* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
/* rx rss_redir_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
*/
/* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
/* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
/* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
/* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
/* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
/* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
/* rx tpo_rpf_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
*/
/* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
/* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
/* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
/* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
/* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
/* rx vl_inner_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
*/
/* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
/* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
/* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
/* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
/* rx vl_outer_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
*/
/* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
/* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
/* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
/* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
/* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
/* rx vl_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
*/
/* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
/* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
/* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
/* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
/* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
/* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
/* RX vl_accept_untagged_mode Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
*/
/* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
/* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
/* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
/* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
/* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
/* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
/* rX vl_untagged_act[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
*/
/* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
/* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
/* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
/* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
/* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
/* RX vl_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
*/
/* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
/* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
/* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
/* RX vl_act{F}[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
*/
/* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
/* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
/* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
/* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
/* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
/* RX vl_id{F}[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
*/
/* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
/* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
/* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
/* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
/* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
*/
/* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
/* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
/* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
*/
/* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
/* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
/* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
/* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
/* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
/* rx et_up{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
*/
/* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
/* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
/* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
/* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
/* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
/* rx et_rxq{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
*/
/* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
/* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
/* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
/* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
/* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
/* rx et_up{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
*/
/* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
/* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
/* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
/* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
/* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
/* rx et_rxq{f}[4:0] bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
*/
/* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
/* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
/* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
/* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
/* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
/* rx et_mng_rxq{f} bitfield definitions
* preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
*/
/* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
/* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
/* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
/* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
/* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
/* rx et_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
*/
/* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
/* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
/* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
/* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
/* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
/* rx et_val{f}[f:0] bitfield definitions
* preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
*/
/* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
/* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
/* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
/* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
/* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
/* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
/* rx desc{d}_vl_strip bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
*/
/* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+ (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
/* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
/* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
/* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
/* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
/* rx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
/* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
/* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
/* tx dca{d}_cpuid[7:0] bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
*/
/* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
/* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
/* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
/* tx lso_en[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
*/
/* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
/* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
/* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
/* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
/* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
/* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
/* tx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
*/
/* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
/* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
/* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
/* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
/* tx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
*/
/* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
/* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
/* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
/* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
/* tx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
*/
/* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
/* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
/* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
/* tx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
*/
/* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
/* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
/* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
/* tx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
*/
/* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
/* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
/* tx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
*/
/* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
/* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
/* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
/* tx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
*/
/* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
/* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
/* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
/* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
/* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
/* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
*/
/* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+ (0x00007c18 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
/* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
/* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
/* tx lso_tcp_flag_first[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
*/
/* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
/* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
/* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
/* tx lso_tcp_flag_last[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
*/
/* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
/* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
/* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
/* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
/* tx lso_tcp_flag_mid[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
*/
/* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
/* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
/* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
/* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
/* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
/* RX lro_en[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
*/
/* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
/* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
/* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
/* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
/* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
/* RX lro_ptopt_en Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
*/
/* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
/* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
/* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
/* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
/* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
/* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
/* RX lro_q_ses_lmt Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
*/
/* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
/* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
/* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
/* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
/* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
*/
/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
/* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
/* RX lro_pkt_min[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
*/
/* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
/* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
/* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
/* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
/* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
/* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
/* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
/* RX lro_tb_div[11:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
*/
/* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
/* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
/* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
/* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
/* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
/* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
/* RX lro_ina_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
*/
/* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
/* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
/* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
/* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
/* RX lro_max_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
*/
/* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
/* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
/* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
/* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
/* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
/* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
/* TX dca{D}_cpuid[7:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
*/
/* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
/* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
/* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
/* TX dca{D}_desc_en Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
*/
/* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
/* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
/* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
/* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
/* TX desc{D}_en Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
*/
/* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
/* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
/* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
/* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
/* TX desc{D}_hd[C:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
*/
/* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
/* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
/* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
/* TX desc{D}_len[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
*/
/* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
/* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
/* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
/* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
/* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
*/
/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
(0x00007C18 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
/* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
/* TX tdm_int_mod_en Bitfield Definitions
* Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
*/
/* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
/* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
/* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
/* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
/* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
/* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
* PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
/* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
/* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
*/
/* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
/* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
/* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
/* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
/* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
/* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
*/
/* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
/* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
/* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
/* tx tx{b}_lo_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
*/
/* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
/* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
/* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
/* tx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
*/
/* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
/* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
/* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
*/
/* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
/* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
/* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
/* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
/* tx tx_scp_ins_en bitfield definitions
* preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
*/
/* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
/* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
/* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
/* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
/* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
/* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
*/
/* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
/* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
/* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
/* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
/* tx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
*/
/* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
/* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
/* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
/* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
/* tx pkt_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
*/
/* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
/* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
/* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
/* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
/* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
/* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
*/
/* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
/* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
*/
/* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
/* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
/* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
/* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
/* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
/* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
/* tx desc_rate_limit[a:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
*/
/* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
/* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
/* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
/* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
/* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
/* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
/* tx desc_tc_arb_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
*/
/* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
/* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
/* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
/* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
*/
/* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
/* tx desc_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
*/
/* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
/* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
/* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
/* tx desc_vm_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
*/
/* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
/* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
/* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
/* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
/* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
/* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
/* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
/* mac_phy register access busy bitfield definitions
* preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
*/
/* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
/* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
/* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
/* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
/* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
/* mac_phy msm register address[7:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
*/
/* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
/* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
/* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
/* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
/* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
/* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
/* mac_phy register read strobe bitfield definitions
* preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
*/
/* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
/* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
/* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
/* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
/* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
/* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
/* mac_phy msm register read data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
*/
/* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
/* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
/* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
/* mac_phy msm register write data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
*/
/* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
/* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
/* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
/* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
/* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
/* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
/* mac_phy register write strobe bitfield definitions
* preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
*/
/* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
/* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
/* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
/* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
/* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
/* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
*/
/* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
/* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
/* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
/* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
/* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
/* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
/* mif register reset disable bitfield definitions
* preprocessor definitions for the bitfield "register reset disable".
@@ -2324,32 +2332,35 @@
*/
/* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
/* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
/* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
/* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
/* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
/* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
/* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
/* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
+/* tx dma total request limit */
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
+
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
* Base Address: 0x00008980
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
/* pcie reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2357,19 +2368,23 @@
*/
/* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
/* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
/* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
/* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
+
+/* PCI core control register */
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
/* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+ (0x00000300u + (scratch_scp) * 0x4)
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..967f0fd07fcf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,41 +11,244 @@
* abstraction layer.
*/
-#include "../aq_hw.h"
+#include "../aq_nic.h"
#include "../aq_hw_utils.h"
-#include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
#include <linux/random.h>
#define HW_ATL_UCP_0X370_REG 0x0370U
#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_FW_VERSION 0x18
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
#define HW_ATL_MPI_STATE_MSK 0x00FFU
#define HW_ATL_MPI_STATE_SHIFT 0U
-#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U
#define HW_ATL_MPI_SPEED_SHIFT 16U
-static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
- u32 *p, u32 cnt)
+#define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704
+#define HW_ATL_MPI_BOOT_EXIT_CODE 0x388
+
+#define HW_ATL_MAC_PHY_CONTROL 0x4000
+#define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
+
+#define HW_ATL_FW_VER_1X 0x01050006U
+#define HW_ATL_FW_VER_2X 0x02000000U
+#define HW_ATL_FW_VER_3X 0x03000000U
+
+#define FORCE_FLASHLESS 0
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+{
+ int err = 0;
+
+ err = hw_atl_utils_soft_reset(self);
+ if (err)
+ return err;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &self->chip_features);
+
+ hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
+
+ if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_1x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
+ self->fw_ver_actual) == 0) {
+ *fw_ops = &aq_fw_2x_ops;
+ } else {
+ aq_pr_err("Bad FW version detected: %x\n",
+ self->fw_ver_actual);
+ return -EOPNOTSUPP;
+ }
+ self->aq_fw_ops = *fw_ops;
+ err = self->aq_fw_ops->init(self);
+ return err;
+}
+
+static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
+{
+ int k = 0;
+ u32 gsr;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ AQ_HW_SLEEP(50);
+
+ /* Cleanup SPI */
+ aq_hw_write_reg(self, 0x534, 0xA0);
+ aq_hw_write_reg(self, 0x100, 0x9F);
+ aq_hw_write_reg(self, 0x100, 0x809F);
+
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ /* Kickstart MAC */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+ aq_hw_write_reg(self, 0x520, 0x1);
+ AQ_HW_SLEEP(10);
+ aq_hw_write_reg(self, 0x404, 0x180e0);
+
+ for (k = 0; k < 1000; k++) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+
+ flb_status = flb_status & 0x10;
+ if (flb_status)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("MAC kickstart failed\n");
+ return -EIO;
+ }
+
+ /* FW reset */
+ aq_hw_write_reg(self, 0x404, 0x80e0);
+ AQ_HW_SLEEP(50);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+
+ /* Kickstart PHY - skipped */
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
+{
+ u32 gsr, rbl_status;
+ int k;
+
+ aq_hw_write_reg(self, 0x404, 0x40e1);
+ aq_hw_write_reg(self, 0x3a0, 0x1);
+ aq_hw_write_reg(self, 0x32a8, 0x0);
+
+ /* Alter RBL status */
+ aq_hw_write_reg(self, 0x388, 0xDEAD);
+
+ /* Global software reset*/
+ hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+ hw_atl_tx_tx_reg_res_dis_set(self, 0U);
+ aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
+ BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
+ HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
+ gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
+ aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
+ (gsr & 0xFFFFBFFF) | 0x8000);
+
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0x0);
+
+ aq_hw_write_reg(self, 0x404, 0x40e0);
+
+ /* Wait for RBL boot */
+ for (k = 0; k < 1000; k++) {
+ rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
+ if (rbl_status && rbl_status != 0xDEAD)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (!rbl_status || rbl_status == 0xDEAD) {
+ aq_pr_err("RBL Restart failed");
+ return -EIO;
+ }
+
+ /* Restore NVR */
+ if (FORCE_FLASHLESS)
+ aq_hw_write_reg(self, 0x534, 0xA0);
+
+ if (rbl_status == 0xF1A7) {
+ aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
+ return -ENOTSUPP;
+ }
+
+ for (k = 0; k < 1000; k++) {
+ u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
+
+ if (fw_state)
+ break;
+ AQ_HW_SLEEP(10);
+ }
+ if (k == 1000) {
+ aq_pr_err("FW kickstart failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+{
+ int k;
+ u32 boot_exit_code = 0;
+
+ for (k = 0; k < 1000; ++k) {
+ u32 flb_status = aq_hw_read_reg(self,
+ HW_ATL_MPI_DAISY_CHAIN_STATUS);
+ boot_exit_code = aq_hw_read_reg(self,
+ HW_ATL_MPI_BOOT_EXIT_CODE);
+ if (flb_status != 0x06000000 || boot_exit_code != 0)
+ break;
+ }
+
+ if (k == 1000) {
+ aq_pr_err("Neither RBL nor FLB firmware started\n");
+ return -EOPNOTSUPP;
+ }
+
+ self->rbl_enabled = (boot_exit_code != 0);
+
+ if (self->rbl_enabled)
+ return hw_atl_utils_soft_reset_rbl(self);
+ else
+ return hw_atl_utils_soft_reset_flb(self);
+}
+
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
{
int err = 0;
- AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
- HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -66,7 +269,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -78,7 +281,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
int err = 0;
bool is_locked;
- is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
@@ -97,7 +300,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
}
}
- reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
@@ -119,7 +322,7 @@ err_exit:
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ const struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
@@ -133,20 +336,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
- reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+ hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
- AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
- err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
-
- if (err < 0)
- pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
- AQ_CFG_DRV_NAME,
- aq_hw_caps->fw_ver_expected,
- aq_hw_read_reg(self, 0x18U));
return err;
}
@@ -174,14 +369,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = -1;
goto err_exit;
}
- err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
- (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+ (u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
- sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.tid = 0xFFFFU & (++self->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
@@ -199,7 +394,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
- PHAL_ATLANTIC->rpc_tid = sw.tid;
+ self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
@@ -221,9 +416,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->rpc_addr,
+ self->rpc_addr,
(u32 *)(void *)
- &PHAL_ATLANTIC->rpc,
+ &self->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
@@ -231,19 +426,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
- *rpc = &PHAL_ATLANTIC->rpc;
+ *rpc = &self->rpc;
}
err_exit:
return err;
}
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
{
int err = 0;
- err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
if (err < 0)
goto err_exit;
@@ -259,7 +453,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
}
@@ -270,7 +464,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
- PHAL_ATLANTIC->mbox_addr,
+ self->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
@@ -281,27 +475,27 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
- pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
}
err_exit:;
}
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state)
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
- u32 ucp_0x368 = 0;
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
- ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
- aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+ val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
return 0;
}
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
- enum hal_atl_utils_fw_state_e state, u32 speed)
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed)
{
int err = 0;
u32 transaction_id = 0;
@@ -320,11 +514,22 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
goto err_exit;
}
- err = hw_atl_utils_mpi_set_speed(self, speed, state);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
+ (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
err_exit:;
}
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+
+ val = state | (val & HW_ATL_MPI_SPEED_MSK);
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
+ return 0;
+}
+
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
@@ -365,7 +570,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
@@ -373,15 +577,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
u32 l = 0U;
u32 mac_addr[2];
- self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
-
- hw_atl_utils_hw_chip_features_init(self,
- &PHAL_ATLANTIC_A0->chip_features);
-
- err = hw_atl_utils_mpi_create(self, aq_hw_caps);
- if (err < 0)
- goto err_exit;
-
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
@@ -396,7 +591,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
- AQ_DIMOF(mac_addr));
+ ARRAY_SIZE(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
@@ -427,7 +622,6 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
mac[0] = (u8)(0xFFU & h);
}
-err_exit:
return err;
}
@@ -465,7 +659,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
- u32 val = reg_glb_mif_id_get(self);
+ u32 val = hw_atl_reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
@@ -500,76 +694,46 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
- if (!self->aq_link_status.mbps)
- return 0;
-
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
- mbox.stats._N_ - hw_self->last_stats._N_)
-
- AQ_SDELTA(uprc);
- AQ_SDELTA(mprc);
- AQ_SDELTA(bprc);
- AQ_SDELTA(erpt);
-
- AQ_SDELTA(uptc);
- AQ_SDELTA(mptc);
- AQ_SDELTA(bptc);
- AQ_SDELTA(erpr);
-
- AQ_SDELTA(ubrc);
- AQ_SDELTA(ubtc);
- AQ_SDELTA(mbrc);
- AQ_SDELTA(mbtc);
- AQ_SDELTA(bbrc);
- AQ_SDELTA(bbtc);
- AQ_SDELTA(dpc);
-
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+ mbox.stats._N_ - self->last_stats._N_)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+ }
#undef AQ_SDELTA
+ self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+ self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+ self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
- memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+ memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data, unsigned int *p_count)
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
- struct hw_atl_stats_s *stats = &hw_self->curr_stats;
- int i = 0;
-
- data[i] = stats->uprc + stats->mprc + stats->bprc;
- data[++i] = stats->uprc;
- data[++i] = stats->mprc;
- data[++i] = stats->bprc;
- data[++i] = stats->erpt;
- data[++i] = stats->uptc + stats->mptc + stats->bptc;
- data[++i] = stats->uptc;
- data[++i] = stats->mptc;
- data[++i] = stats->bptc;
- data[++i] = stats->ubrc;
- data[++i] = stats->ubtc;
- data[++i] = stats->mbrc;
- data[++i] = stats->mbtc;
- data[++i] = stats->bbrc;
- data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
- data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats->dpc;
-
- if (p_count)
- *p_count = ++i;
-
- return 0;
+ return &self->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -598,14 +762,14 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
- hw_atl_utils_hw_mac_regs[i]);
+ hw_atl_utils_hw_mac_regs[i]);
return 0;
}
@@ -614,3 +778,13 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
}
+
+const struct aq_fw_ops aq_fw_1x_ops = {
+ .init = hw_atl_utils_mpi_create,
+ .reset = NULL,
+ .get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .set_link_speed = hw_atl_utils_mpi_set_speed,
+ .set_state = hw_atl_utils_mpi_set_state,
+ .update_link_status = hw_atl_utils_mpi_get_link_status,
+ .update_stats = hw_atl_utils_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..2c690947910a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
-#include "../aq_common.h"
-
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
struct hw_atl_stats_s stats;
};
-struct __packed hw_atl_s {
- struct aq_hw_s base;
- struct hw_atl_stats_s last_stats;
- struct hw_atl_stats_s curr_stats;
- u64 speed;
- unsigned int chip_features;
- u32 fw_ver_actual;
- atomic_t dpc;
- u32 mbox_addr;
- u32 rpc_addr;
- u32 rpc_tid;
- struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
- PHAL_ATLANTIC->chip_features)
+ self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
@@ -171,6 +180,73 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+enum hw_atl_fw2x_rate {
+ FW2X_RATE_100M = 0x20,
+ FW2X_RATE_1G = 0x100,
+ FW2X_RATE_2G5 = 0x200,
+ FW2X_RATE_5G = 0x400,
+ FW2X_RATE_10G = 0x800,
+};
+
+enum hw_atl_fw2x_caps_lo {
+ CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_FD,
+ CAPS_LO_100BASETX_HD,
+ CAPS_LO_100BASET4_HD,
+ CAPS_LO_100BASET2_HD,
+ CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASET2_FD,
+ CAPS_LO_1000BASET_HD,
+ CAPS_LO_1000BASET_FD,
+ CAPS_LO_2P5GBASET_FD,
+ CAPS_LO_5GBASET_FD,
+ CAPS_LO_10GBASET_FD,
+};
+
+enum hw_atl_fw2x_caps_hi {
+ CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_10BASET_EEE,
+ CAPS_HI_RESERVED2,
+ CAPS_HI_PAUSE,
+ CAPS_HI_ASYMMETRIC_PAUSE,
+ CAPS_HI_100BASETX_EEE,
+ CAPS_HI_RESERVED3,
+ CAPS_HI_RESERVED4,
+ CAPS_HI_1000BASET_FD_EEE,
+ CAPS_HI_2P5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_10GBASET_FD_EEE,
+ CAPS_HI_RESERVED5,
+ CAPS_HI_RESERVED6,
+ CAPS_HI_RESERVED7,
+ CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED9,
+ CAPS_HI_CABLE_DIAG,
+ CAPS_HI_TEMPERATURE,
+ CAPS_HI_DOWNSHIFT,
+ CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_LINK_DROP,
+ CAPS_HI_SLEEP_PROXY,
+ CAPS_HI_WOL,
+ CAPS_HI_MAC_STOP,
+ CAPS_HI_EXT_LOOPBACK,
+ CAPS_HI_INT_LOOPBACK,
+ CAPS_HI_EFUSE_AGENT,
+ CAPS_HI_WOL_TIMER,
+ CAPS_HI_STATISTICS,
+ CAPS_HI_TRANSACTION_ID,
+};
+
+struct aq_hw_s;
+struct aq_fw_ops;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
+int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
+
+int hw_atl_utils_soft_reset(struct aq_hw_s *self);
+
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -183,19 +259,15 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state,
u32 speed);
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
- enum hal_atl_utils_fw_state_e state);
-
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps,
+ const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
@@ -207,8 +279,11 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data,
- unsigned int *p_count);
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt);
+
+extern const struct aq_fw_ops aq_fw_1x_ops;
+extern const struct aq_fw_ops aq_fw_2x_ops;
#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
new file mode 100644
index 000000000000..8cfce95c82fc
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -0,0 +1,184 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
+ * Atlantic hardware abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+static int aq_fw2x_init(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+ 1000U, 10U);
+ return err;
+}
+
+static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
+{
+ enum hw_atl_fw2x_rate rate = 0;
+
+ if (speed & AQ_NIC_RATE_10G)
+ rate |= FW2X_RATE_10G;
+
+ if (speed & AQ_NIC_RATE_5G)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_5GSR)
+ rate |= FW2X_RATE_5G;
+
+ if (speed & AQ_NIC_RATE_2GS)
+ rate |= FW2X_RATE_2G5;
+
+ if (speed & AQ_NIC_RATE_1G)
+ rate |= FW2X_RATE_1G;
+
+ if (speed & AQ_NIC_RATE_100M)
+ rate |= FW2X_RATE_100M;
+
+ return rate;
+}
+
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
+{
+ u32 val = link_speed_mask_2fw2x_ratemask(speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL_ADDR, val);
+
+ return 0;
+}
+
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state)
+{
+ /* No explicit state in 2x fw */
+ return 0;
+}
+
+static int aq_fw2x_update_link_status(struct aq_hw_s *self)
+{
+ u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+ u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
+ FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+ struct aq_hw_link_status_s *link_status = &self->aq_link_status;
+
+ if (speed) {
+ if (speed & FW2X_RATE_10G)
+ link_status->mbps = 10000;
+ else if (speed & FW2X_RATE_5G)
+ link_status->mbps = 5000;
+ else if (speed & FW2X_RATE_2G5)
+ link_status->mbps = 2500;
+ else if (speed & FW2X_RATE_1G)
+ link_status->mbps = 1000;
+ else if (speed & FW2X_RATE_100M)
+ link_status->mbps = 100;
+ else
+ link_status->mbps = 10000;
+ } else {
+ link_status->mbps = 0;
+ }
+
+ return 0;
+}
+
+int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2] = { 0 };
+ u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR);
+
+ if (efuse_addr != 0) {
+ err = hw_atl_utils_fw_downld_dwords(self,
+ efuse_addr + (40U * 4U),
+ mac_addr,
+ ARRAY_SIZE(mac_addr));
+ if (err)
+ return err;
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ unsigned int rnd = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ l = 0xE3000000U
+ | (0xFFFFU & rnd)
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+ return err;
+}
+
+static int aq_fw2x_update_stats(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS);
+
+ /* Toggle statistics bit for FW to update */
+ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ /* Wait FW to report back */
+ AQ_HW_WAIT_FOR(orig_stats_val !=
+ (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ BIT(CAPS_HI_STATISTICS)),
+ 1U, 10000U);
+ if (err)
+ return err;
+
+ return hw_atl_utils_update_stats(self);
+}
+
+const struct aq_fw_ops aq_fw_2x_ops = {
+ .init = aq_fw2x_init,
+ .reset = NULL,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
+ .update_link_status = aq_fw2x_update_link_status,
+ .update_stats = aq_fw2x_update_stats,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..5265b937677b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -10,9 +10,11 @@
#ifndef VER_H
#define VER_H
-#define NIC_MAJOR_DRIVER_VERSION 1
-#define NIC_MINOR_DRIVER_VERSION 5
-#define NIC_BUILD_DRIVER_VERSION 345
+#define NIC_MAJOR_DRIVER_VERSION 2
+#define NIC_MINOR_DRIVER_VERSION 0
+#define NIC_BUILD_DRIVER_VERSION 2
#define NIC_REVISION_DRIVER_VERSION 0
+#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
+
#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 3c63b16d485f..d9efbc8d783b 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -159,6 +159,8 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
+
+ unsigned int rx_missed_errors;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce718..bd277b0dc615 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,6 +26,8 @@
#include "emac.h"
+static void arc_emac_restart(struct net_device *ndev);
+
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- pktlen = info & LEN_MASK;
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
- skb = rx_buff->skb;
- skb_put(skb, pktlen);
- skb->dev = ndev;
- skb->protocol = eth_type_trans(skb, ndev);
-
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
- /* Prepare the BD for next cycle */
- rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
- EMAC_BUFFER_SIZE);
- if (unlikely(!rx_buff->skb)) {
+ /* Prepare the BD for next cycle. netif_receive_skb()
+ * only if new skb was allocated and mapped to avoid holes
+ * in the RX fifo.
+ */
+ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "cannot allocate skb\n");
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
- /* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
- /* receive_skb only if new skb was allocated to avoid holes */
- netif_receive_skb(skb);
-
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
- netdev_err(ndev, "cannot dma map\n");
- dev_kfree_skb(rx_buff->skb);
+ netdev_err(ndev, "cannot map dma buffer\n");
+ dev_kfree_skb(skb);
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
+ stats->rx_dropped++;
continue;
}
+
+ /* unmap previosly mapped skb */
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ pktlen = info & LEN_MASK;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+ skb_put(rx_buff->skb, pktlen);
+ rx_buff->skb->dev = ndev;
+ rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+ netif_receive_skb(rx_buff->skb);
+
+ rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
@@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev: Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int miss;
+
+ miss = arc_reg_get(priv, R_MISS);
+ if (miss) {
+ stats->rx_errors += miss;
+ stats->rx_missed_errors += miss;
+ priv->rx_missed_errors += miss;
+ }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev: Pointer to the net_device structure.
+ * @budget: How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+ int budget, unsigned int work_done)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct arc_emac_bd *rxbd;
+
+ if (work_done)
+ priv->rx_missed_errors = 0;
+
+ if (priv->rx_missed_errors && budget) {
+ rxbd = &priv->rxbd[priv->last_rx_bd];
+ if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+ arc_emac_restart(ndev);
+ priv->rx_missed_errors = 0;
+ }
+ }
+}
+
+/**
* arc_emac_poll - NAPI poll handler.
* @napi: Pointer to napi_struct structure.
* @budget: How many BDs to process on 1 call.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
unsigned int work_done;
arc_emac_tx_clean(ndev);
+ arc_emac_rx_miss_handle(ndev);
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
+ arc_emac_rx_stall_check(ndev, budget, work_done);
+
return work_done;
}
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
if (status & MSER_MASK) {
stats->rx_missed_errors += 0x100;
stats->rx_errors += 0x100;
+ priv->rx_missed_errors += 0x100;
+ napi_schedule(&priv->napi);
}
if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ int i;
+
+ if (net_ratelimit())
+ netdev_warn(ndev, "restarting stalled EMAC\n");
+
+ netif_stop_queue(ndev);
+
+ /* Disable interrupts */
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+ /* Disable EMAC */
+ arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+ /* Return the sk_buff to system */
+ arc_free_tx_queue(ndev);
+
+ /* Clean Tx BD's */
+ priv->txbd_curr = 0;
+ priv->txbd_dirty = 0;
+ memset(priv->txbd, 0, TX_RING_SZ);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ struct arc_emac_bd *rxbd = &priv->rxbd[i];
+ unsigned int info = le32_to_cpu(rxbd->info);
+
+ if (!(info & FOR_EMAC)) {
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ }
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+ }
+ priv->last_rx_bd = 0;
+
+ /* Make sure info is visible to EMAC before enable */
+ wmb();
+
+ /* Enable interrupts */
+ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+ /* Enable EMAC */
+ arc_reg_or(priv, R_CTRL, EN_MASK);
+
+ netif_start_queue(ndev);
+}
+
static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_open = arc_emac_open,
.ndo_stop = arc_emac_stop,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c6163874e4e7..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* RMII interface needs always a rate of 50MHz */
err = clk_set_rate(priv->refclk, 50000000);
- if (err)
+ if (err) {
dev_err(dev,
"failed to change reference clock rate (%d)\n", err);
+ goto out_regulator_disable;
+ }
if (priv->soc_data->need_div_macclk) {
priv->macclk = devm_clk_get(dev, "macclk");
@@ -230,12 +232,14 @@ static int emac_rockchip_probe(struct platform_device *pdev)
err = arc_emac_probe(ndev, interface);
if (err) {
dev_err(dev, "failed to probe arc emac (%d)\n", err);
- goto out_regulator_disable;
+ goto out_clk_disable_macclk;
}
return 0;
+
out_clk_disable_macclk:
- clk_disable_unprepare(priv->macclk);
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
out_regulator_disable:
if (priv->regulator)
regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d9346e2ac720..14a59e51db67 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm63xx_enet_platform_data *pd;
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
- const char *clk_name;
int i, ret;
if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
- priv->mac_id = pdev->id;
- /* get rx & tx dma channel id for this mac */
- if (priv->mac_id == 0) {
- priv->rx_chan = 0;
- priv->tx_chan = 1;
- clk_name = "enet0";
- } else {
- priv->rx_chan = 2;
- priv->tx_chan = 3;
- clk_name = "enet1";
- }
-
- priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+ priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->dma_chan_width = pd->dma_chan_width;
priv->dma_has_sram = pd->dma_has_sram;
priv->dma_desc_shift = pd->dma_desc_shift;
+ priv->rx_chan = pd->rx_chan;
+ priv->tx_chan = pd->tx_chan;
}
- if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+ if (priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
- sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+ sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
@@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 5a66728d4776..1d3c917eb830 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
struct bcm_enet_priv {
- /* mac id (from platform device id) */
- int mac_id;
-
/* base remapped address of device */
void __iomem *base;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 087f01b4dc3a..f15a8fc6dfc9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
goto out;
}
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Insert TSB and checksum infos */
if (priv->tsb_en) {
skb = bcm_sysport_insert_tsb(skb, dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1d96cd594ade..8eef9fb6b1fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
-#define ENET_BRCM_TAG_LEN 4
-
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
u32 flags;
int i;
- /* The Ethernet switch we are interfaced with needs packets to be at
- * least 64 bytes (including FCS) otherwise they will be discarded when
- * they enter the switch port logic. When Broadcom tags are enabled, we
- * need to make sure that packets are at least 68 bytes
- * (including FCS and tag) because the length verification is done after
- * the Broadcom tag is stripped off the ingress packet.
- */
- if (netdev_uses_dsa(net_dev)) {
- if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
- goto err_stats;
- }
-
if (skb->len > BGMAC_DESC_CTL1_LEN) {
netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
@@ -240,7 +226,6 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
-err_stats:
net_dev->stats.tx_dropped++;
net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 7919f6112ecf..5e34b34f7740 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5818,8 +5818,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
struct l2_fhdr *rx_hdr;
int ret = -ENODEV;
struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
- struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
- struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
tx_napi = bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5355d2..d7c98e807ca8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
- be16_to_cpu(skb->protocol));
+ netdev_WARN_ONCE(bp->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
}
}
#endif
@@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
*/
if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
- else if (bp->dev->features & NETIF_F_GRO &&
- bnx2x_mtu_allows_gro(bp->dev->mtu))
+ else if (bp->dev->features & NETIF_F_GRO_HW)
fp->mode = TPA_MODE_GRO;
else
fp->mode = TPA_MODE_DISABLED;
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- if (IS_PF(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
- if (IS_PF(bp))
+ if (IS_PF(bp) && !BP_NOMCP(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set
@@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (!bnx2x_mtu_allows_gro(new_mtu))
+ dev->features &= ~NETIF_F_GRO_HW;
+
if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
@@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
}
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM)) {
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+ features &= ~NETIF_F_GRO_HW;
+ if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- features &= ~NETIF_F_GRO;
- }
return features;
}
@@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
}
}
- /* if GRO is changed while LRO is enabled, don't force a reload */
- if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
- changes &= ~NETIF_F_GRO;
-
- /* if GRO is changed while HW TPA is off, don't force a reload */
- if ((changes & NETIF_F_GRO) && bp->disable_tpa)
- changes &= ~NETIF_F_GRO;
+ /* Don't care about GRO changes */
+ changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a7560b48..7b08323e3f3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@@ -12400,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
}
if (CHIP_IS_E1(bp))
@@ -13273,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13309,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_LRO)
+ dev->features &= ~NETIF_F_GRO_HW;
/* Add Loopback capability to the device */
dev->hw_features |= NETIF_F_LOOPBACK;
@@ -14320,7 +14331,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
- bnx2x_init_shmem(bp);
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9c1cad..7c560d545c03 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61ca4eb7c6fa..1500243b9886 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
+ BCM5745x_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -147,6 +148,7 @@ static const struct {
[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
static const u16 bnxt_vf_req_snif[] = {
HWRM_FUNC_CFG,
+ HWRM_FUNC_VF_CFG,
HWRM_PORT_PHY_QCFG,
HWRM_CFA_L2_FILTER_ALLOC,
};
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = 1;
}
*event |= BNXT_RX_EVENT;
- goto next_rx_no_prod;
+ goto next_rx_no_prod_no_len;
}
cons = rxcmp->rx_cmp_opaque;
@@ -1644,7 +1649,10 @@ next_rx:
rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons);
-next_rx_no_prod:
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
@@ -1706,12 +1714,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (BNXT_VF(bp))
goto async_event_process_exit;
- if (data1 & 0x20000) {
+
+ /* print unsupported speed warning in forced speed mode only */
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+ (data1 & 0x20000)) {
u16 fw_speed = link_info->force_link_speed;
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
- netdev_warn(bp->dev, "Link speed %d no longer supported\n",
- speed);
+ if (speed != SPEED_UNKNOWN)
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
}
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
/* fall thru */
@@ -1798,6 +1810,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
+ cpr->event_ctr++;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
@@ -2021,6 +2034,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
+ if (bp->flags & BNXT_FLAG_DIM) {
+ struct net_dim_sample dim_sample;
+
+ net_dim_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
mmiowb();
return work_done;
}
@@ -2243,6 +2265,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (rxr->xdp_prog)
bpf_prog_put(rxr->xdp_prog);
+ if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2276,6 +2301,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring = &rxr->rx_ring_struct;
+ rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+ if (rc < 0)
+ return rc;
+
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
@@ -2606,6 +2635,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
}
}
@@ -2751,7 +2782,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if (bp->dev->features & NETIF_F_GRO)
+ else if (bp->dev->features & NETIF_F_GRO_HW)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -2830,6 +2861,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
@@ -2839,10 +2873,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
bp->rx_dir = DMA_BIDIRECTIONAL;
bp->rx_skb_func = bnxt_rx_page_skb;
+ /* Disable LRO or GRO_HW */
+ netdev_update_features(bp->dev);
} else {
bp->dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -4469,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ struct hwrm_func_qcfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return -EIO;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ u16 cp, stats;
+
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ cp = min_t(u16, cp, stats);
+ hw_resc->resv_cp_rings = cp;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
@@ -4488,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
{
struct hwrm_func_cfg_input req = {0};
+ u32 enables = 0;
int rc;
- if (bp->hwrm_spec_code < 0x10601)
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+ }
+ if (!enables)
return 0;
- if (BNXT_VF(bp))
+ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ bp->hw_resc.resv_tx_rings = tx_rings;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 enables = 0;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+ bp->hw_resc.resv_tx_rings = tx_rings;
return 0;
+ }
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- req.fid = cpu_to_le16(0xffff);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
- req.num_tx_rings = cpu_to_le16(*tx_rings);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_stat_ctxs = req.num_cmpl_rings;
+ req.num_vnics = cpu_to_le16(vnics);
+
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_get_rings(bp);
+ return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+ int cp, int vnic)
+{
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ else
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int tx = bp->tx_nr_rings;
+ int rx = bp->rx_nr_rings;
+ int cp = bp->cp_nr_rings;
+ int grp, rx_rings, rc;
+ bool sh = false;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+
+ grp = bp->rx_nr_rings;
+ if (tx == hw_resc->resv_tx_rings &&
+ (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+ (rx == hw_resc->resv_rx_rings &&
+ grp == hw_resc->resv_hw_ring_grps &&
+ cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+ return 0;
+
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ if (rc)
return rc;
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
- mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc)
- bp->tx_reserved_rings = *tx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ rx = hw_resc->resv_rx_rings;
+ cp = hw_resc->resv_cp_rings;
+ grp = hw_resc->resv_hw_ring_grps;
+ vnic = hw_resc->resv_vnics;
+ }
+
+ rx_rings = rx;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (rx >= 2) {
+ rx_rings = rx >> 1;
+ } else {
+ if (netif_running(bp->dev))
+ return -ENOMEM;
+
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bnxt_set_ring_params(bp);
+ }
+ }
+ rx_rings = min_t(int, rx_rings, grp);
+ rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx = rx_rings << 1;
+ cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ bp->tx_nr_rings = tx;
+ bp->rx_nr_rings = rx_rings;
+ bp->cp_nr_rings = cp;
+
+ if (!tx || !rx || !cp || !grp || !vnic)
+ return -ENOMEM;
+
return rc;
}
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
- struct hwrm_func_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rx = bp->rx_nr_rings;
+ int vnic = 1;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return false;
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ return true;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (hw_resc->resv_rx_rings != rx ||
+ hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+ hw_resc->resv_vnics != vnic))
+ return true;
+ return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ u32 flags, enables;
int rc;
- if (bp->hwrm_spec_code < 0x10801)
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
return 0;
- if (BNXT_VF(bp))
- return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ u32 flags, enables;
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+ enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
req.num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+ req.num_rx_rings = cpu_to_le16(rx_rings);
+ req.num_cmpl_rings = cpu_to_le16(cp_rings);
+ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req.num_stat_ctxs = cpu_to_le16(cp_rings);
+ req.num_vnics = cpu_to_le16(1);
+ if (bp->flags & BNXT_FLAG_RFS)
+ req.num_vnics = cpu_to_le16(rx_rings + 1);
+ }
+ req.flags = cpu_to_le32(flags);
+ req.enables = cpu_to_le32(enables);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
return 0;
}
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings)
+{
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+ ring_grps, cp_rings);
+
+ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+ cp_rings);
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
@@ -4579,6 +4877,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
req->flags = cpu_to_le16(flags);
}
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal coal;
+ unsigned int grp_idx;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+ coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+ coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+ if (!bnapi->rx_ring)
+ return -ENODEV;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+ bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+ grp_idx = bnapi->index;
+ req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+ return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+ HWRM_CMD_TIMEOUT);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
@@ -4732,11 +5060,60 @@ func_qcfg_exit:
return rc;
}
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -EIO;
+ goto hwrm_func_resc_qcaps_exit;
+ }
+
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->vf_resv_strategy =
+ le16_to_cpu(resp->vf_reservation_strategy);
+ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+ pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+ }
+hwrm_func_resc_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -4746,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+ if (!hw_resc->max_hw_ring_grps)
+ hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4763,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!pf->max_hw_ring_grps)
- pf->max_hw_ring_grps = pf->max_tx_rings;
- pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- pf->max_vnics = le16_to_cpu(resp->max_vnics);
- pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4781,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
-
- vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
- vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
- vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
- vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
- vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
- if (!vf->max_hw_ring_grps)
- vf->max_hw_ring_grps = vf->max_tx_rings;
- vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
- vf->max_vnics = le16_to_cpu(resp->max_vnics);
- vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
@@ -4810,6 +5175,21 @@ hwrm_func_qcaps_exit:
return rc;
}
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -4879,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
- bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
- resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
- if (resp->hwrm_intf_maj < 1) {
+ bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ if (resp->hwrm_intf_maj_8b < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd);
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
- resp->hwrm_fw_rsvd);
+ resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+ resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5031,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
return rc;
}
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+ req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+ if (size == 128)
+ req.cache_linesize =
+ FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5166,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
- if (bp->tx_reserved_rings != bp->tx_nr_rings) {
- int tx = bp->tx_nr_rings;
-
- if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
- tx < bp->tx_nr_rings) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
}
rc = bnxt_hwrm_ring_alloc(bp);
@@ -5394,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_rsscos_ctxs;
-#endif
- return bp->pf.max_rsscos_ctxs;
+ return bp->hw_resc.max_rsscos_ctxs;
}
static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_vnics;
-#endif
- return bp->pf.max_vnics;
+ return bp->hw_resc.max_vnics;
}
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_stat_ctxs;
-#endif
- return bp->pf.max_stat_ctxs;
+ return bp->hw_resc.max_stat_ctxs;
}
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_stat_ctxs = max;
- else
-#endif
- bp->pf.max_stat_ctxs = max;
+ bp->hw_resc.max_stat_ctxs = max;
}
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return bp->vf.max_cp_rings;
-#endif
- return bp->pf.max_cp_rings;
+ return bp->hw_resc.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_cp_rings = max;
- else
-#endif
- bp->pf.max_cp_rings = max;
+ bp->hw_resc.max_cp_rings = max;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- return min_t(unsigned int, bp->vf.max_irqs,
- bp->vf.max_cp_rings);
-#endif
- return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
-#if defined(CONFIG_BNXT_SRIOV)
- if (BNXT_VF(bp))
- bp->vf.max_irqs = max_irqs;
- else
-#endif
- bp->pf.max_irqs = max_irqs;
+ bp->hw_resc.max_irqs = max_irqs;
}
static int bnxt_init_msix(struct bnxt *bp)
@@ -5567,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+ int orig_cp = bp->hw_resc.resv_cp_rings;
+ int tcs = netdev_get_num_tc(bp->dev);
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+ rc = __bnxt_reserve_rings(bp);
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+ return rc;
+ }
+ if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ return rc;
+ }
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ netdev_err(bp->dev, "tx ring reservation failure\n");
+ netdev_reset_tc(bp->dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ return -ENOMEM;
+ }
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ return 0;
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -5692,8 +6082,14 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->cp_nr_rings; i++)
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (bp->bnapi[i]->rx_ring)
+ cancel_work_sync(&cpr->dim.work);
+
napi_disable(&bp->bnapi[i]->napi);
+ }
}
static void bnxt_enable_napi(struct bnxt *bp)
@@ -5701,7 +6097,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
bp->bnapi[i]->in_reset = false;
+
+ if (bp->bnapi[i]->rx_ring) {
+ INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+ cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
napi_enable(&bp->bnapi[i]->napi);
}
}
@@ -6311,6 +6713,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_setup_int_mode(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6446,23 +6852,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init)
{
- int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
- if (bp->sriov_cfg) {
- rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
- !bp->sriov_cfg,
- BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
- }
-
/* Close the VF-reps before closing PF */
if (BNXT_PF(bp))
bnxt_vf_reps_close(bp);
-#endif
+
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
@@ -6485,6 +6881,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_del_napi(bp);
}
bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ __bnxt_close_nic(bp, irq_re_init, link_re_init);
return rc;
}
@@ -6764,13 +7176,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
- netdev_warn(bp->dev,
- "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
- min(max_rss_ctxs - 1, max_vnics - 1));
+ if (bp->rx_nr_rings > 1)
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(max_rss_ctxs - 1, max_vnics - 1));
return false;
}
- return true;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return true;
+
+ if (vnics == bp->hw_resc.resv_vnics)
+ return true;
+
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ if (vnics <= bp->hw_resc.resv_vnics)
+ return true;
+
+ netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ return false;
#else
return false;
#endif
@@ -6784,6 +7209,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+ if (!(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ if (features & NETIF_F_GRO_HW)
+ features &= ~NETIF_F_LRO;
+
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
@@ -6817,9 +7251,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
- if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (features & NETIF_F_GRO_HW)
flags |= BNXT_FLAG_GRO;
- if (features & NETIF_F_LRO)
+ else if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7096,7 +7530,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
- int rc;
+ int rx_rings = rx;
+ int cp, rc;
if (tcs)
tx_sets = tcs;
@@ -7112,7 +7547,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
- return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7346,7 +7784,8 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct bnxt *bp = cb_priv;
- if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -7717,12 +8156,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- /* In SRIOV each PF-pool (PF + child VFs) serves as a
- * switching domain, the PF's perm mac-addr can be used
- * as the unique parent-id
- */
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+ attr->u.ppid.id_len = sizeof(bp->switch_id);
+ memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
break;
default:
return -EOPNOTSUPP;
@@ -7800,8 +8235,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
- if (bp->xdp_prog)
- bpf_prog_put(bp->xdp_prog);
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -7869,24 +8302,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp)
{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_ring_grps = 0;
-#ifdef CONFIG_BNXT_SRIOV
- if (!BNXT_PF(bp)) {
- *max_tx = bp->vf.max_tx_rings;
- *max_rx = bp->vf.max_rx_rings;
- *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
- max_ring_grps = bp->vf.max_hw_ring_grps;
- } else
-#endif
- {
- *max_tx = bp->pf.max_tx_rings;
- *max_rx = bp->pf.max_rx_rings;
- *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
- *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
- max_ring_grps = bp->pf.max_hw_ring_grps;
- }
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
*max_cp -= 1;
*max_rx -= 2;
@@ -7922,8 +8345,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
if (rc)
return rc;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features &= ~NETIF_F_LRO;
- bp->dev->features &= ~NETIF_F_LRO;
+ bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+ bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bnxt_set_ring_params(bp);
}
@@ -7951,6 +8374,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
return rc;
}
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+ bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+ bp->rx_nr_rings = bp->cp_nr_rings;
+ bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -7966,14 +8400,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
+ else
+ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ rc = __bnxt_reserve_rings(bp);
if (rc)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ /* Rings may have been trimmed, re-reserve the trimmed rings. */
+ if (bnxt_need_reserve_rings(bp)) {
+ rc = __bnxt_reserve_rings(bp);
+ if (rc)
+ netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ }
bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -7982,11 +8428,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc;
}
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
+ int rc;
+
ASSERT_RTNL();
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return 0;
+
bnxt_hwrm_func_qcaps(bp);
- bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+ __bnxt_close_nic(bp, true, false);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+ dev_close(bp->dev);
+ else
+ rc = bnxt_open_nic(bp, true, false);
+ return rc;
}
static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8000,7 +8458,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
+ /* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
} else {
eth_hw_addr_random(bp->dev);
@@ -8106,7 +8564,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ if (dev->features & NETIF_F_GRO_HW)
+ dev->features &= ~NETIF_F_LRO;
dev->priv_flags |= IFF_UNICAST_FLT;
#ifdef CONFIG_BNXT_SRIOV
@@ -8208,6 +8670,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f0045f..1989c470172c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.8.0"
+#define DRV_MODULE_VERSION "1.9.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 8
+#define DRV_VER_MIN 9
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
@@ -23,6 +23,8 @@
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -607,6 +609,17 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
struct bnxt_tpa_info {
void *data;
u8 *data_ptr;
@@ -664,12 +677,20 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
+ struct xdp_rxq_info xdp_rxq;
};
struct bnxt_cp_ring_info {
u32 cp_raw_cons;
void __iomem *cp_doorbell;
+ struct bnxt_coal rx_ring_coal;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 event_ctr;
+
+ struct net_dim dim;
+
struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -755,19 +776,38 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
};
-#if defined(CONFIG_BNXT_SRIOV)
-struct bnxt_vf_info {
- u16 fw_fid;
- u8 mac_addr[ETH_ALEN];
+struct bnxt_hw_resc {
+ u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 min_cp_rings;
u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 min_rx_rings;
u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
u16 max_l2_ctxs;
- u16 max_irqs;
+ u16 min_vnics;
u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 max_irqs;
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */
+ u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only
+ * stored by PF.
+ */
u16 vlan;
u32 flags;
#define BNXT_VF_QOS 0x1
@@ -788,15 +828,6 @@ struct bnxt_pf_info {
u16 fw_fid;
u16 port_id;
u8 mac_addr[ETH_ALEN];
- u16 max_rsscos_ctxs;
- u16 max_cp_rings;
- u16 max_tx_rings; /* HW assigned max tx rings for this PF */
- u16 max_rx_rings; /* HW assigned max rx rings for this PF */
- u16 max_hw_ring_grps;
- u16 max_irqs;
- u16 max_l2_ctxs;
- u16 max_vnics;
- u16 max_stat_ctxs;
u32 first_vf_id;
u16 active_vfs;
u16 max_vfs;
@@ -808,6 +839,9 @@ struct bnxt_pf_info {
u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
+ u8 vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
void *hwrm_cmd_req_addr[4];
dma_addr_t hwrm_cmd_req_dma_addr[4];
struct bnxt_vf_info *vf;
@@ -944,17 +978,6 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
-struct bnxt_coal {
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
- /* RING_IDLE enabled when coal ticks < idle_thresh */
- u16 idle_thresh;
- u8 bufs_per_record;
- u8 budget;
-};
-
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1126,6 +1149,8 @@ struct bnxt {
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
+ #define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1185,7 +1210,6 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
- int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1297,6 +1321,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15
+ struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
@@ -1346,6 +1371,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
+ u8 switch_id[8];
struct bnxt_tc_info *tc_info;
};
@@ -1421,6 +1447,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fed37cd9ae1d..3c746f2d9ed8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
- data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
- GFP_KERNEL);
+ data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
- memset(data, 0, data_len);
bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
get.dest_data_addr = cpu_to_le64(mapping);
get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 000000000000..408dd190331e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct bnxt_cp_ring_info *cpr = container_of(dim,
+ struct bnxt_cp_ring_info,
+ dim);
+ struct bnxt_napi *bnapi = container_of(cpr,
+ struct bnxt_napi,
+ cp_ring);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+ cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+ bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b13ce5ebde8d..1801582076be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
+ coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
int rc = 0;
u16 mult;
+ if (coal->use_adaptive_rx_coalesce) {
+ bp->flags |= BNXT_FLAG_DIM;
+ } else {
+ if (bp->flags & BNXT_FLAG_DIM) {
+ bp->flags &= ~(BNXT_FLAG_DIM);
+ goto reset_coalesce;
+ }
+ }
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
update_stats = true;
}
+reset_coalesce:
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
@@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev,
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
break;
+ case BNXT_FW_RESET_AP:
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+ break;
default:
return -EINVAL;
}
@@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc)
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ } else if (*flags == ETH_RESET_AP) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10803)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+ if (!rc)
+ netdev_info(dev, "Reset Application Processor request successful.\n");
} else {
rc = -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ff601b42fcc8..836ef682f24c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,7 @@ struct bnxt_led_cfg {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+#define BNXT_FW_RESET_AP 0xfffe
#define BNXT_FW_RESET_CHIP 0xffff
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0880e4..82d17f8cc0db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 3
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
-#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
-#define HWRM_VERSION_STR "1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_ENGINE_CKV_HELLO 0x12dUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ u8 reserved2[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- __le16 len;
- __le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
- __le32 unused_2;
-};
-
-/* HWRM Completion Record (16 bytes) */
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* HWRM Forwarded Request (16 bytes) */
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused_0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* HWRM Forwarded Response (16 bytes) */
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
-};
-
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
- __le16 type;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
-};
-
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 hwrm_intf_rsvd;
- u8 hwrm_fw_maj;
- u8 hwrm_fw_min;
- u8 hwrm_fw_bld;
- u8 hwrm_fw_rsvd;
- u8 mgmt_fw_maj;
- u8 mgmt_fw_min;
- u8 mgmt_fw_bld;
- u8 mgmt_fw_rsvd;
- u8 netctrl_fw_maj;
- u8 netctrl_fw_min;
- u8 netctrl_fw_bld;
- u8 netctrl_fw_rsvd;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- u8 roce_fw_maj;
- u8 roce_fw_min;
- u8 roce_fw_bld;
- u8 roce_fw_rsvd;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- __le32 reserved2[4];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 init_pending;
- #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_0;
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 enables;
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
- __le16 stag_vid;
- u8 stag_pcp;
- u8 unused_2;
- __be16 stag_tpid;
- __le16 ctag_vid;
- u8 ctag_pcp;
- u8 unused_3;
- __be16 ctag_tpid;
- __le32 rsvd1;
- __le32 rsvd2;
- __le32 unused_4;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 enables;
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+ __le16 stag_vid;
+ u8 stag_pcp;
+ u8 unused_1;
+ __be16 stag_tpid;
+ __le16 ctag_vid;
+ u8 ctag_pcp;
+ u8 unused_2;
+ __be16 ctag_tpid;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ u8 unused_3[4];
+};
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- __le16 mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- u8 unused_2;
- __le16 num_mcast_filters;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 cache_linesize;
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+ #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+ __le16 num_mcast_filters;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-/* Output (16 bytes) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0;
- u8 unused_1;
- __le32 max_vnic_id_cnt;
- __le64 vnic_id_tbl_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[2];
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id_cnt;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- __le16 unused_1;
- __le32 timestamp;
- __le32 unused_2;
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- __le32 unused_0;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0;
- u8 unused_1;
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- __le16 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
- u8 unused_0;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ u8 unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2;
- u8 unused_3;
- __le32 tx_lpi_timer;
- __le32 unused_4;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2[2];
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le32 unused_3;
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- u8 unused_0;
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
- #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- u8 unused_1;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- u8 unused_0;
- __le16 unused_1;
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_error;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
-};
-
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 unused_0;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* Output (80 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused_0[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- u8 led0_group_id;
- u8 unused_1;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- u8 led1_group_id;
- u8 unused_2;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- u8 led2_group_id;
- u8 unused_3;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- u8 led3_group_id;
- u8 unused_4;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- __le16 unused_0;
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 valid;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 valid;
};
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- __le16 port_id;
- __le16 unused_0;
+ __le16 port_id;
+ u8 unused_0[2];
};
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_1[5];
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
};
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
};
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
};
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0;
- u8 unused_1;
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
- __le32 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- u8 unused_0;
- u8 unused_1;
- __le32 max_agg_timer;
- __le32 min_agg_len;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- __le32 unused_0;
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- __le16 unused_1[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ u8 unused_0[4];
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 unused_1[6];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 unused_1;
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- u8 unused_2;
- u8 unused_3;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- u8 unused_4;
- u8 unused_5;
- __le32 reserved1;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- u8 unused_6;
- u8 unused_7;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0[3];
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_1[2];
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_2[2];
+ __le32 reserved1;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 unused_3;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- u8 unused_8[3];
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 unused_4[3];
};
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[6];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- u8 unused_0;
- __le16 ring_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- u8 l2_addr[6];
- u8 unused_0;
- u8 unused_1;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_2;
- u8 unused_3;
- u8 t_l2_addr[6];
- u8 unused_4;
- u8 unused_5;
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- u8 unused_6;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_7;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- u8 unused_8;
- __le32 unused_9;
- __le64 l2_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0[2];
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- __le32 unused_0;
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
};
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- u8 unused_0;
- __le16 unused_1;
- __le32 encap_data[20];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 tunnel_type;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* Output (24 bytes) */
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- u8 unused_0[7];
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ u8 unused_0[7];
};
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 unused_0;
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ u8 unused_0[4];
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- __le16 unused_1[3];
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2;
- u8 unused_3;
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- u8 unused_4;
- u8 unused_5;
- u8 unused_6[3];
- u8 unused_7;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 unused_0;
+};
+
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ u8 unused_0[6];
};
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- __le16 unused_0;
-};
-
-/* Output (176 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- __le32 unused_0;
- char vfr_name[32];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
};
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0[7];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __be16 tunnel_dst_port_val;
- __be32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- u8 unused_0;
- __le16 tunnel_dst_port_id;
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- __le32 unused_0;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
};
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 host_idx;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ u8 host_idx;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
- __le32 unused_1;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ __le16 len;
+ u8 version;
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- u8 count;
- u8 unused_0;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ u8 count;
+ u8 unused_0;
};
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
};
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le32 encap_async_event_cmpl[4];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- u8 unused_0;
- __le32 unused_1;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- __le32 unused_2;
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* Output (16 bytes) */
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0;
- __le16 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
};
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- __le32 unused_0;
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3[3];
- u8 unused_4;
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- __le16 unused_5[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2[3];
- u8 unused_3;
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- __le16 unused_4[3];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- u8 wol_pkt_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
};
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:384b/48B) */
struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- __le32 dir_item_length;
- __le32 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ __le32 dir_item_length;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
};
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
- __le32 unused_2;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- u8 unused_1[3];
-};
-
-/* Output (32 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- __le16 unused_0[3];
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
};
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- __le16 unused_0;
-};
-
-/* Output (24 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ u8 unused_0[7];
};
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* Command specific Error Codes (8 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- u8 unused_0[7];
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ u8 unused_0[7];
};
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1;
- u8 unused_2;
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
- __le32 unused_3;
- u8 unused_4;
- u8 unused_5;
- u8 unused_6;
- u8 valid;
-};
-
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+ u8 unused_2[7];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* Output (16 bytes) */
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 pcie_lane_num;
+ u8 unused_0[6];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 unused_4;
- u8 valid;
-};
-
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 resp_data_addr;
- __le32 resp_data_offset;
- __le16 data_len;
- u8 flags;
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 total_data_len;
- __le16 copied_data_len;
- u8 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- __le16 unused_0;
- __le16 size;
- __le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET (0x0UL)
- #define HWRM_FUNC_BUF_UNRGTR (0xeUL)
- #define HWRM_FUNC_VF_CFG (0xfUL)
- #define RESERVED1 (0x10UL)
- #define HWRM_FUNC_RESET (0x11UL)
- #define HWRM_FUNC_GETFID (0x12UL)
- #define HWRM_FUNC_VF_ALLOC (0x13UL)
- #define HWRM_FUNC_VF_FREE (0x14UL)
- #define HWRM_FUNC_QCAPS (0x15UL)
- #define HWRM_FUNC_QCFG (0x16UL)
- #define HWRM_FUNC_CFG (0x17UL)
- #define HWRM_FUNC_QSTATS (0x18UL)
- #define HWRM_FUNC_CLR_STATS (0x19UL)
- #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
- #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
- #define HWRM_FUNC_DRV_RGTR (0x1dUL)
- #define HWRM_FUNC_DRV_QVER (0x1eUL)
- #define HWRM_FUNC_BUF_RGTR (0x1fUL)
- #define HWRM_PORT_PHY_CFG (0x20UL)
- #define HWRM_PORT_MAC_CFG (0x21UL)
- #define HWRM_PORT_TS_QUERY (0x22UL)
- #define HWRM_PORT_QSTATS (0x23UL)
- #define HWRM_PORT_LPBK_QSTATS (0x24UL)
- #define HWRM_PORT_CLR_STATS (0x25UL)
- #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
- #define HWRM_PORT_PHY_QCFG (0x27UL)
- #define HWRM_PORT_MAC_QCFG (0x28UL)
- #define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
- #define HWRM_PORT_PHY_QCAPS (0x2aUL)
- #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
- #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
- #define HWRM_PORT_LED_CFG (0x2dUL)
- #define HWRM_PORT_LED_QCFG (0x2eUL)
- #define HWRM_PORT_LED_QCAPS (0x2fUL)
- #define HWRM_QUEUE_QPORTCFG (0x30UL)
- #define HWRM_QUEUE_QCFG (0x31UL)
- #define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_FUNC_VLAN_CFG (0x33UL)
- #define HWRM_FUNC_VLAN_QCFG (0x34UL)
- #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
- #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
- #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
- #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
- #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
- #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
- #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
- #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
- #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
- #define HWRM_VNIC_ALLOC (0x40UL)
- #define HWRM_VNIC_FREE (0x41UL)
- #define HWRM_VNIC_CFG (0x42UL)
- #define HWRM_VNIC_QCFG (0x43UL)
- #define HWRM_VNIC_TPA_CFG (0x44UL)
- #define HWRM_VNIC_TPA_QCFG (0x45UL)
- #define HWRM_VNIC_RSS_CFG (0x46UL)
- #define HWRM_VNIC_RSS_QCFG (0x47UL)
- #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
- #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
- #define HWRM_VNIC_QCAPS (0x4aUL)
- #define HWRM_RING_ALLOC (0x50UL)
- #define HWRM_RING_FREE (0x51UL)
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
- #define HWRM_RING_RESET (0x5eUL)
- #define HWRM_RING_GRP_ALLOC (0x60UL)
- #define HWRM_RING_GRP_FREE (0x61UL)
- #define RESERVED5 (0x64UL)
- #define RESERVED6 (0x65UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
- #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
- #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
- #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
- #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
- #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
- #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
- #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
- #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
- #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
- #define HWRM_CFA_EM_FLOW_ALLOC (0x9cUL)
- #define HWRM_CFA_EM_FLOW_FREE (0x9dUL)
- #define HWRM_CFA_EM_FLOW_CFG (0x9eUL)
- #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
- #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
- #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
- #define HWRM_STAT_CTX_ALLOC (0xb0UL)
- #define HWRM_STAT_CTX_FREE (0xb1UL)
- #define HWRM_STAT_CTX_QUERY (0xb2UL)
- #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
- #define HWRM_FW_RESET (0xc0UL)
- #define HWRM_FW_QSTATUS (0xc1UL)
- #define HWRM_FW_SET_TIME (0xc8UL)
- #define HWRM_FW_GET_TIME (0xc9UL)
- #define HWRM_FW_SET_STRUCTURED_DATA (0xcaUL)
- #define HWRM_FW_GET_STRUCTURED_DATA (0xcbUL)
- #define HWRM_FW_IPC_MAILBOX (0xccUL)
- #define HWRM_EXEC_FWD_RESP (0xd0UL)
- #define HWRM_REJECT_FWD_RESP (0xd1UL)
- #define HWRM_FWD_RESP (0xd2UL)
- #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
- #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
- #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
- #define HWRM_WOL_FILTER_FREE (0xf1UL)
- #define HWRM_WOL_FILTER_QCFG (0xf2UL)
- #define HWRM_WOL_REASON_QCFG (0xf3UL)
- #define HWRM_CFA_METER_PROFILE_ALLOC (0xf5UL)
- #define HWRM_CFA_METER_PROFILE_FREE (0xf6UL)
- #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
- #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
- #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
- #define HWRM_CFA_VFR_ALLOC (0xfdUL)
- #define HWRM_CFA_VFR_FREE (0xfeUL)
- #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
- #define HWRM_CFA_VF_PAIR_FREE (0x101UL)
- #define HWRM_CFA_VF_PAIR_INFO (0x102UL)
- #define HWRM_CFA_FLOW_ALLOC (0x103UL)
- #define HWRM_CFA_FLOW_FREE (0x104UL)
- #define HWRM_CFA_FLOW_FLUSH (0x105UL)
- #define HWRM_CFA_FLOW_STATS (0x106UL)
- #define HWRM_CFA_FLOW_INFO (0x107UL)
- #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
- #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
- #define HWRM_CFA_PAIR_ALLOC (0x10dUL)
- #define HWRM_CFA_PAIR_FREE (0x10eUL)
- #define HWRM_CFA_PAIR_INFO (0x10fUL)
- #define HWRM_FW_IPC_MSG (0x110UL)
- #define HWRM_SELFTEST_QLIST (0x200UL)
- #define HWRM_SELFTEST_EXEC (0x201UL)
- #define HWRM_SELFTEST_IRQ (0x202UL)
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
- #define HWRM_DBG_READ_DIRECT (0xff10UL)
- #define HWRM_DBG_READ_INDIRECT (0xff11UL)
- #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
- #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
- #define HWRM_DBG_DUMP (0xff14UL)
- #define HWRM_DBG_ERASE_NVM (0xff15UL)
- #define HWRM_DBG_CFG (0xff16UL)
- #define HWRM_DBG_COREDUMP_LIST (0xff17UL)
- #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
- #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
- #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
- #define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
- #define HWRM_NVM_FLUSH (0xfff0UL)
- #define HWRM_NVM_GET_VARIABLE (0xfff1UL)
- #define HWRM_NVM_SET_VARIABLE (0xfff2UL)
- #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
- #define HWRM_NVM_MODIFY (0xfff4UL)
- #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
- #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
- #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
- #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
- #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
- #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
- #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
- #define HWRM_NVM_RAW_DUMP (0xfffcUL)
- #define HWRM_NVM_READ (0xfffdUL)
- #define HWRM_NVM_WRITE (0xfffeUL)
- #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS (0x0UL)
- #define HWRM_ERR_CODE_FAIL (0x1UL)
- #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
- #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
- #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
- #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
- #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
- __le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- __le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- u8 priority;
- u8 valid;
- u8 unused_0[3];
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
};
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee18660bc33..d87faad901fe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->vf = vf_id;
vf = &bp->pf.vf[vf_id];
- memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ if (is_valid_ether_addr(vf->mac_addr))
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ else
+ memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_vf_resource_cfg_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+ u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+ struct bnxt_pf_info *pf = &bp->pf;
+ int i, rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+ vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+ else
+ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+ req.min_rsscos_ctx = cpu_to_le16(1);
+ req.max_rsscos_ctx = cpu_to_le16(1);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+ req.min_cmpl_rings = cpu_to_le16(1);
+ req.min_tx_rings = cpu_to_le16(1);
+ req.min_rx_rings = cpu_to_le16(1);
+ req.min_l2_ctxs = cpu_to_le16(1);
+ req.min_vnics = cpu_to_le16(1);
+ req.min_stat_ctx = cpu_to_le16(1);
+ req.min_hw_ring_grps = cpu_to_le16(1);
+ } else {
+ vf_cp_rings /= num_vfs;
+ vf_tx_rings /= num_vfs;
+ vf_rx_rings /= num_vfs;
+ vf_vnics /= num_vfs;
+ vf_stat_ctx /= num_vfs;
+ vf_ring_grps /= num_vfs;
+
+ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.min_l2_ctxs = cpu_to_le16(4);
+ req.min_vnics = cpu_to_le16(vf_vnics);
+ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ }
+ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.max_l2_ctxs = cpu_to_le16(4);
+ req.max_vnics = cpu_to_le16(vf_vnics);
+ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc) {
+ rc = -ENOMEM;
+ break;
+ }
+ pf->active_vfs = i + 1;
+ pf->vf[i].fw_fid = pf->first_vf_id + i;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (pf->active_vfs) {
+ u16 n = 1;
+
+ if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+ n = pf->active_vfs;
+
+ hw_resc->max_tx_rings -= vf_tx_rings * n;
+ hw_resc->max_rx_rings -= vf_rx_rings * n;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+ hw_resc->max_cp_rings -= vf_cp_rings * n;
+ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+ hw_resc->max_vnics -= vf_vnics * n;
+
+ rc = pf->active_vfs;
+ }
+ return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
- u16 vf_ring_grps;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ max_stat_ctxs = hw_resc->max_stat_ctxs;
+
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
else
- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
- vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
- vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
- vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+ num_vfs;
+ vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+ vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+ vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (!rc) {
- pf->max_tx_rings -= total_vf_tx_rings;
- pf->max_rx_rings -= vf_rx_rings * num_vfs;
- pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
- pf->max_cp_rings -= vf_cp_rings * num_vfs;
- pf->max_rsscos_ctxs -= num_vfs;
- pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
- pf->max_vnics -= vf_vnics * num_vfs;
+ if (rc)
+ rc = -ENOMEM;
+ if (pf->active_vfs) {
+ hw_resc->max_tx_rings -= total_vf_tx_rings;
+ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+ hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+ hw_resc->max_rsscos_ctxs -= num_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ hw_resc->max_vnics -= vf_vnics * num_vfs;
+ rc = pf->active_vfs;
}
return rc;
}
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ else
+ return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
- avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rss_ctxs = vfs_supported;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
min_rx_rings)
rx_ok = 1;
} else {
- if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
avail_cp >= min_tx_rings)
tx_ok = 1;
- if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+ min_rss_ctxs)
rss_ok = 1;
if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1;
/* Reserve resources for VFs */
- rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
- if (rc)
- goto err_out2;
+ rc = bnxt_func_cfg(bp, *num_vfs);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ goto err_out2;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+ *num_vfs = rc;
+ }
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ exec_fwd_resp_exit:
return rc;
}
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+ struct hwrm_func_vf_cfg_input *req =
+ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+ /* Only allow VF to set a valid MAC address if the PF assigned MAC
+ * address is zero
+ */
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+ !is_valid_ether_addr(vf->mac_addr)) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ }
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
struct hwrm_cfa_l2_filter_alloc_input *req =
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+ bool mac_ok = false;
- if (!is_valid_ether_addr(vf->mac_addr) ||
- ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ /* VF MAC address must first match PF MAC address, if it is valid.
+ * Otherwise, it must match the VF MAC address if firmware spec >=
+ * 1.2.2
+ */
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+ mac_ok = true;
+ } else if (bp->hwrm_spec_code < 0x10202) {
+ mac_ok = true;
+ } else {
+ mac_ok = true;
+ }
+ if (mac_ok)
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
- else
- return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
}
static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
+ case HWRM_FUNC_VF_CFG:
+ rc = bnxt_vf_store_mac(bp, vf);
+ break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 3d201d7324bd..fbe6e208e17b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
}
/* Is dev a VF-rep? */
- if (dev != pf_bp->dev)
+ if (bnxt_dev_is_vf_rep(dev))
return bnxt_vf_rep_get_fid(dev);
bp = netdev_priv(dev);
@@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
const struct tc_action *tc_act)
{
- int ifindex = tcf_mirred_ifindex(tc_act);
- struct net_device *dev;
+ struct net_device *dev = tcf_mirred_dev(tc_act);
- dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
if (!dev) {
- netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+ netdev_info(bp->dev, "no dev in mirred action");
return -EINVAL;
}
@@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- if (rc)
- return rc;
-
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
}
}
- return rc;
+ return 0;
}
#define GET_KEY(flow_cmd, key_type) \
@@ -421,7 +416,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
- if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+ if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
@@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
void *flow_node;
int rc, i;
- rc = rhashtable_walk_start(iter);
- if (rc && rc != -EAGAIN) {
- i = 0;
- goto done;
- }
+ rhashtable_walk_start(iter);
rc = 0;
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
@@ -1483,9 +1474,6 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
{
int rc = 0;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d188c43..26290403f38f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -124,7 +124,8 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
- if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev))
+ if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
+ !tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -241,6 +242,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
* happens under the rtnl_lock() this routine is safe
@@ -376,6 +382,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
ether_addr_copy(dev->dev_addr, dev->perm_addr);
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +466,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto err;
+
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe70e42..38b9a75ad724 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
{
return 0;
}
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+ return false;
+}
#endif /* CONFIG_BNXT_SRIOV */
#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e5847557a..1389ab5e05df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp.data = *data_ptr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
+ xdp.rxq = &rxr->xdp_rxq;
orig_data = xdp.data;
mapping = rx_buf->mapping - bp->rx_dma_offset;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24b4f4ceceef..b1e35a9accf1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2527,9 +2527,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
spin_unlock_irq(&priv->lock);
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT)
- phy_mac_interrupt(priv->dev->phydev,
- !!(status & UMAC_IRQ_LINK_UP));
+ if (status & UMAC_IRQ_LINK_EVENT) {
+ priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ phy_mac_interrupt(priv->dev->phydev);
+ }
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index de51c2177d03..a77ee2f8fb8d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2014 Broadcom Corporation.
+ * Copyright (C) 2005-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
*
* Firmware is:
* Derived from proprietary unpublished source code,
- * Copyright (C) 2000-2003 Broadcom Corporation.
+ * Copyright (C) 2000-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Ltd.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
@@ -3225,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tw32(GRC_MODE, tp->grc_mode | val);
+ /* On one of the AMD platform, MRRS is restricted to 4000 because of
+ * south bridge limitation. As a workaround, Driver is setting MRRS
+ * to 2048 instead of default 4096.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+ val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+ tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+ }
+
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+ tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
@@ -14774,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
static void tg3_get_5720_nvram_info(struct tg3 *tp)
{
- u32 nvcfg1, nvmpinstrp;
+ u32 nvcfg1, nvmpinstrp, nv_status;
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14786,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
}
switch (nvmpinstrp) {
+ case FLASH_5762_MX25L_100:
+ case FLASH_5762_MX25L_200:
+ case FLASH_5762_MX25L_400:
+ case FLASH_5762_MX25L_800:
+ case FLASH_5762_MX25L_160_320:
+ tp->nvram_pagesize = 4096;
+ tp->nvram_jedecnum = JEDEC_MACRONIX;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ tg3_flag_set(tp, FLASH);
+ nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+ tp->nvram_size =
+ (1 << (nv_status >> AUTOSENSE_DEVID &
+ AUTOSENSE_DEVID_MASK)
+ << AUTOSENSE_SIZE_IN_MB);
+ return;
+
case FLASH_5762_EEPROM_HD:
nvmpinstrp = FLASH_5720_EEPROM_HD;
break;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d02d1e6..47f51cc0566d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2014 Broadcom Corporation.
+ * Copyright (C) 2007-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
*/
#ifndef _T3_H
@@ -96,6 +97,7 @@
#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -281,6 +283,9 @@
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
+#define MAX_READ_REQ_SIZE_2048 0x00004000
+#define MAX_READ_REQ_MASK 0x00007000
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1858,7 +1863,7 @@
#define NVRAM_STAT 0x00007004
#define NVRAM_WRDATA 0x00007008
#define NVRAM_ADDR 0x0000700c
-#define NVRAM_ADDR_MSK 0x00ffffff
+#define NVRAM_ADDR_MSK 0x07ffffff
#define NVRAM_RDDATA 0x00007010
#define NVRAM_CFG1 0x00007014
#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
@@ -1940,6 +1945,11 @@
#define FLASH_5720_EEPROM_LD 0x00000003
#define FLASH_5762_EEPROM_HD 0x02000001
#define FLASH_5762_EEPROM_LD 0x02000003
+#define FLASH_5762_MX25L_100 0x00800000
+#define FLASH_5762_MX25L_200 0x00800002
+#define FLASH_5762_MX25L_400 0x00800001
+#define FLASH_5762_MX25L_800 0x00800003
+#define FLASH_5762_MX25L_160_320 0x03800002
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2004,7 +2014,11 @@
/* 0x702c unused */
#define NVRAM_ADDR_LOCKOUT 0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS 0x00007038
+#define AUTOSENSE_DEVID 0x00000010
+#define AUTOSENSE_DEVID_MASK 0x00000007
+#define AUTOSENSE_SIZE_IN_MB 17
+/* 0x703c --> 0x7500 unused */
#define OTP_MODE 0x00007500
#define OTP_MODE_OTP_THRU_GRC 0x00000001
@@ -3373,6 +3387,7 @@ struct tg3 {
#define JEDEC_ST 0x20
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
+#define JEDEC_MACRONIX 0xc2
#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
#define ATMEL_AT24C02_PAGE_SIZE (8)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c93f3a2dc6c1..86659823b259 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/interrupt.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -164,14 +165,38 @@
#define GEM_DCFG5 0x0290 /* Design Config 5 */
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
+#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
+/* Screener Type 2 match registers */
+#define GEM_SCRT2 0x540
+
+/* EtherType registers */
+#define GEM_ETHT 0x06E0
+
+/* Type 2 compare registers */
+#define GEM_T2CMPW0 0x0700
+#define GEM_T2CMPW1 0x0704
+#define T2CMP_OFST(t2idx) (t2idx * 2)
+
+/* type 2 compare registers
+ * each location requires 3 compare regs
+ */
+#define GEM_IP4SRC_CMP(idx) (idx * 3)
+#define GEM_IP4DST_CMP(idx) (idx * 3 + 1)
+#define GEM_PORT_CMP(idx) (idx * 3 + 2)
+
+/* Which screening type 2 EtherType register will be used (0 - 7) */
+#define SCRT2_ETHT 0
+
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
+#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@@ -455,6 +480,16 @@
#define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1
+/* Bitfields in DCFG8. */
+#define GEM_T1SCR_OFFSET 24
+#define GEM_T1SCR_SIZE 8
+#define GEM_T2SCR_OFFSET 16
+#define GEM_T2SCR_SIZE 8
+#define GEM_SCR2ETH_OFFSET 8
+#define GEM_SCR2ETH_SIZE 8
+#define GEM_SCR2CMP_OFFSET 0
+#define GEM_SCR2CMP_SIZE 8
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16
@@ -483,6 +518,66 @@
#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
#define GEM_RXTSMODE_SIZE 2
+/* Bitfields in SCRT2 */
+#define GEM_QUEUE_OFFSET 0 /* Queue Number */
+#define GEM_QUEUE_SIZE 4
+#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */
+#define GEM_VLANPR_SIZE 3
+#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */
+#define GEM_VLANEN_SIZE 1
+#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */
+#define GEM_ETHT2IDX_SIZE 3
+#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */
+#define GEM_ETHTEN_SIZE 1
+#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */
+#define GEM_CMPA_SIZE 5
+#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */
+#define GEM_CMPAEN_SIZE 1
+#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */
+#define GEM_CMPB_SIZE 5
+#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */
+#define GEM_CMPBEN_SIZE 1
+#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */
+#define GEM_CMPC_SIZE 5
+#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */
+#define GEM_CMPCEN_SIZE 1
+
+/* Bitfields in ETHT */
+#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */
+#define GEM_ETHTCMP_SIZE 16
+
+/* Bitfields in T2CMPW0 */
+#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */
+#define GEM_T2CMP_SIZE 16
+#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */
+#define GEM_T2MASK_SIZE 16
+
+/* Bitfields in T2CMPW1 */
+#define GEM_T2DISMSK_OFFSET 9 /* disable mask */
+#define GEM_T2DISMSK_SIZE 1
+#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */
+#define GEM_T2CMPOFST_SIZE 2
+#define GEM_T2OFST_OFFSET 0 /* offset value */
+#define GEM_T2OFST_SIZE 7
+
+/* Offset for screener type 2 compare values (T2CMPOFST).
+ * Note the offset is applied after the specified point,
+ * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
+ * of 12 bytes from this would be the source IP address in an IP header
+ */
+#define GEM_T2COMPOFST_SOF 0
+#define GEM_T2COMPOFST_ETYPE 1
+#define GEM_T2COMPOFST_IPHDR 2
+#define GEM_T2COMPOFST_TCPUDP 3
+
+/* offset from EtherType to IP address */
+#define ETYPE_SRCIP_OFFSET 12
+#define ETYPE_DSTIP_OFFSET 16
+
+/* offset from IP header to port */
+#define IPHDR_SRCPORT_OFFSET 0
+#define IPHDR_DSTPORT_OFFSET 2
+
/* Transmit DMA buffer descriptor Word 1 */
#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
#define GEM_DMA_TXVALID_SIZE 1
@@ -583,6 +678,8 @@
#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
+#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
@@ -920,13 +1017,42 @@ static const struct gem_statistic gem_statistics[] = {
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
+#define QUEUE_STAT_TITLE(title) { \
+ .stat_string = title, \
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+ union {
+ unsigned long first;
+ unsigned long rx_packets;
+ };
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+ QUEUE_STAT_TITLE("rx_packets"),
+ QUEUE_STAT_TITLE("rx_bytes"),
+ QUEUE_STAT_TITLE("rx_dropped"),
+ QUEUE_STAT_TITLE("tx_packets"),
+ QUEUE_STAT_TITLE("tx_bytes"),
+ QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
struct macb;
+struct macb_queue;
struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp);
- int (*mog_rx)(struct macb *bp, int budget);
+ int (*mog_rx)(struct macb_queue *queue, int budget);
};
/* MACB-PTP interface: adapt to platform needs. */
@@ -968,6 +1094,9 @@ struct macb_queue {
unsigned int IMR;
unsigned int TBQP;
unsigned int TBQPH;
+ unsigned int RBQS;
+ unsigned int RBQP;
+ unsigned int RBQPH;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
@@ -975,6 +1104,16 @@ struct macb_queue {
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+ unsigned int rx_tail;
+ unsigned int rx_prepared_head;
+ struct macb_dma_desc *rx_ring;
+ struct sk_buff **rx_skbuff;
+ void *rx_buffers;
+ struct napi_struct napi;
+ struct queue_stats stats;
+
#ifdef CONFIG_MACB_USE_HWSTAMP
struct work_struct tx_ts_task;
unsigned int tx_ts_head, tx_ts_tail;
@@ -982,6 +1121,16 @@ struct macb_queue {
#endif
};
+struct ethtool_rx_fs_item {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_fs_list {
+ struct list_head list;
+ unsigned int count;
+};
+
struct macb {
void __iomem *regs;
bool native_io;
@@ -990,11 +1139,6 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
- unsigned int rx_tail;
- unsigned int rx_prepared_head;
- struct macb_dma_desc *rx_ring;
- struct sk_buff **rx_skbuff;
- void *rx_buffers;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1011,15 +1155,11 @@ struct macb {
struct clk *tx_clk;
struct clk *rx_clk;
struct net_device *dev;
- struct napi_struct napi;
union {
struct macb_stats macb;
struct gem_stats gem;
} hw_stats;
- dma_addr_t rx_ring_dma;
- dma_addr_t rx_buffers_dma;
-
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1032,7 +1172,6 @@ struct macb {
unsigned int dma_burst_length;
phy_interface_t phy_interface;
- struct gpio_desc *reset_gpio;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
@@ -1040,7 +1179,7 @@ struct macb {
int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length;
- u64 ethtool_stats[GEM_STATS_LEN];
+ u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
unsigned int rx_frm_len_mask;
unsigned int jumbo_max_len;
@@ -1057,6 +1196,13 @@ struct macb {
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
struct hwtstamp_config tstamp_config;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_fs_list rx_fs_list;
+ spinlock_t rx_fs_lock;
+ unsigned int max_tuples;
+
+ struct tasklet_struct hresp_err_tasklet;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f74b97b..e84afcf1ecb5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
return index & (bp->rx_ring_size - 1);
}
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
{
- index = macb_rx_ring_wrap(bp, index);
- index = macb_adj_dma_desc_idx(bp, index);
- return &bp->rx_ring[index];
+ index = macb_rx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->rx_ring[index];
}
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
{
- return bp->rx_buffers + bp->rx_buffer_size *
- macb_rx_ring_wrap(bp, index);
+ return queue->rx_buffers + queue->bp->rx_buffer_size *
+ macb_rx_ring_wrap(queue->bp, index);
}
/* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->dev->stats.tx_packets++;
+ queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
+ queue->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netif_wake_subqueue(bp->dev, queue_index);
}
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
{
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb *bp = queue->bp;
struct macb_dma_desc *desc;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
- bp->rx_ring_size) > 0) {
- entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+ while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+ bp->rx_ring_size) > 0) {
+ entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
rmb();
- bp->rx_prepared_head++;
- desc = macb_rx_desc(bp, entry);
+ queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
- if (!bp->rx_skbuff[entry]) {
+ if (!queue->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
break;
}
- bp->rx_skbuff[entry] = skb;
+ queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
/* Make descriptor updates visible to hardware */
wmb();
- netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+ queue, queue->rx_prepared_head, queue->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
unsigned int end)
{
unsigned int frag;
for (frag = begin; frag != end; frag++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+ struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
*/
}
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
unsigned int len;
unsigned int entry;
struct sk_buff *skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr;
bool rxused;
- entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = macb_rx_desc(bp, entry);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
if (!rxused)
break;
- bp->rx_tail++;
+ queue->rx_tail++;
count++;
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
- skb = bp->rx_skbuff[entry];
+ skb = queue->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
+ queue->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
- bp->rx_skbuff[entry] = NULL;
+ queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
+ queue->stats.rx_bytes += skb->len;
gem_ptp_do_rxstamp(bp, skb, desc);
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
netif_receive_skb(skb);
}
- gem_rx_refill(bp);
+ gem_rx_refill(queue);
return count;
}
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
unsigned int last_frag)
{
unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int offset;
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb *bp = queue->bp;
- desc = macb_rx_desc(bp, last_frag);
+ desc = macb_rx_desc(queue, last_frag);
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
if (!skb) {
bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag),
+ macb_rx_buffer(queue, frag),
frag_len);
offset += bp->rx_buffer_size;
- desc = macb_rx_desc(bp, frag);
+ desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
{
+ struct macb *bp = queue->bp;
dma_addr_t addr;
struct macb_dma_desc *desc = NULL;
int i;
- addr = bp->rx_buffers_dma;
+ addr = queue->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- desc = macb_rx_desc(bp, i);
+ desc = macb_rx_desc(queue, i);
macb_set_addr(bp, desc, addr);
desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
desc->addr |= MACB_BIT(RX_WRAP);
- bp->rx_tail = 0;
+ queue->rx_tail = 0;
}
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
{
+ struct macb *bp = queue->bp;
bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
- for (tail = bp->rx_tail; budget > 0; tail++) {
- struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+ for (tail = queue->rx_tail; budget > 0; tail++) {
+ struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
u32 ctrl;
/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
- discard_partial_frame(bp, first_frag, tail);
+ discard_partial_frame(queue, first_frag, tail);
first_frag = tail;
}
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
continue;
}
- dropped = macb_rx_frame(bp, first_frag, tail);
+ dropped = macb_rx_frame(queue, first_frag, tail);
first_frag = -1;
if (unlikely(dropped < 0)) {
reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
- macb_init_rx_ring(bp);
- macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_init_rx_ring(queue);
+ queue_writel(queue, RBQP, queue->rx_ring_dma);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
}
if (first_frag != -1)
- bp->rx_tail = first_frag;
+ queue->rx_tail = first_frag;
else
- bp->rx_tail = tail;
+ queue->rx_tail = tail;
return received;
}
static int macb_poll(struct napi_struct *napi, int budget)
{
- struct macb *bp = container_of(napi, struct macb, napi);
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+ struct macb *bp = queue->bp;
int work_done;
u32 status;
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
- work_done = bp->macbgem_ops.mog_rx(bp, budget);
+ work_done = bp->macbgem_ops.mog_rx(queue, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR);
if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_RX_INT_FLAGS);
}
}
@@ -1244,6 +1258,57 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_hresp_error_task(unsigned long data)
+{
+ struct macb *bp = (struct macb *)data;
+ struct net_device *dev = bp->dev;
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+
+ /* Initialize TX and RX buffers */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH,
+ upper_32_bits(queue->rx_ring_dma));
+#endif
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH,
+ upper_32_bits(queue->tx_ring_dma));
+#endif
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+ macb_writel(bp, NCR, ctrl);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1282,9 +1347,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&bp->napi)) {
+ if (napi_schedule_prep(&queue->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&bp->napi);
+ __napi_schedule(&queue->napi);
}
}
@@ -1333,10 +1398,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /* TODO: Reset the hardware, and maybe move the
- * netdev_err to a lower-priority context as well
- * (work queue?)
- */
+ tasklet_schedule(&bp->hresp_err_tasklet);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1708,38 +1770,44 @@ static void gem_free_rx_buffers(struct macb *bp)
{
struct sk_buff *skb;
struct macb_dma_desc *desc;
+ struct macb_queue *queue;
dma_addr_t addr;
+ unsigned int q;
int i;
- if (!bp->rx_skbuff)
- return;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (!queue->rx_skbuff)
+ continue;
- for (i = 0; i < bp->rx_ring_size; i++) {
- skb = bp->rx_skbuff[i];
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ skb = queue->rx_skbuff[i];
- if (!skb)
- continue;
+ if (!skb)
+ continue;
- desc = macb_rx_desc(bp, i);
- addr = macb_get_addr(bp, desc);
+ desc = macb_rx_desc(queue, i);
+ addr = macb_get_addr(bp, desc);
- dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- skb = NULL;
- }
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
- kfree(bp->rx_skbuff);
- bp->rx_skbuff = NULL;
+ kfree(queue->rx_skbuff);
+ queue->rx_skbuff = NULL;
+ }
}
static void macb_free_rx_buffers(struct macb *bp)
{
- if (bp->rx_buffers) {
+ struct macb_queue *queue = &bp->queues[0];
+
+ if (queue->rx_buffers) {
dma_free_coherent(&bp->pdev->dev,
bp->rx_ring_size * bp->rx_buffer_size,
- bp->rx_buffers, bp->rx_buffers_dma);
- bp->rx_buffers = NULL;
+ queue->rx_buffers, queue->rx_buffers_dma);
+ queue->rx_buffers = NULL;
}
}
@@ -1748,11 +1816,12 @@ static void macb_free_consistent(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (bp->rx_ring) {
+ if (queue->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- bp->rx_ring, bp->rx_ring_dma);
- bp->rx_ring = NULL;
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
}
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1837,37 @@ static void macb_free_consistent(struct macb *bp)
static int gem_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = bp->rx_ring_size * sizeof(struct sk_buff *);
- bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
- if (!bp->rx_skbuff)
- return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- bp->rx_ring_size, bp->rx_skbuff);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = bp->rx_ring_size * sizeof(struct sk_buff *);
+ queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+ if (!queue->rx_skbuff)
+ return -ENOMEM;
+ else
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ bp->rx_ring_size, queue->rx_skbuff);
+ }
return 0;
}
static int macb_alloc_rx_buffers(struct macb *bp)
{
+ struct macb_queue *queue = &bp->queues[0];
int size;
size = bp->rx_ring_size * bp->rx_buffer_size;
- bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_buffers_dma, GFP_KERNEL);
- if (!bp->rx_buffers)
+ queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_buffers_dma, GFP_KERNEL);
+ if (!queue->rx_buffers)
return -ENOMEM;
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
return 0;
}
@@ -1819,17 +1893,16 @@ static int macb_alloc_consistent(struct macb *bp)
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
- }
-
- size = RX_RING_BYTES(bp);
- bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->rx_ring_dma, GFP_KERNEL);
- if (!bp->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ size = RX_RING_BYTES(bp);
+ queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->rx_ring_dma, GFP_KERNEL);
+ if (!queue->rx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+ }
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1856,12 +1929,13 @@ static void gem_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
- }
- bp->rx_tail = 0;
- bp->rx_prepared_head = 0;
+ queue->rx_tail = 0;
+ queue->rx_prepared_head = 0;
+
+ gem_rx_refill(queue);
+ }
- gem_rx_refill(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1943,7 @@ static void macb_init_rings(struct macb *bp)
int i;
struct macb_dma_desc *desc = NULL;
- macb_init_rx_ring(bp);
+ macb_init_rx_ring(&bp->queues[0]);
for (i = 0; i < bp->tx_ring_size; i++) {
desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2052,20 @@ static u32 macb_dbw(struct macb *bp)
*/
static void macb_configure_dma(struct macb *bp)
{
+ struct macb_queue *queue;
+ u32 buffer_size;
+ unsigned int q;
u32 dmacfg;
+ buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
- dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ if (q)
+ queue_writel(queue, RBQS, buffer_size);
+ else
+ dmacfg |= GEM_BF(RXBS, buffer_size);
+ }
if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2134,12 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
#endif
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2280,8 @@ static int macb_open(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+ struct macb_queue *queue;
+ unsigned int q;
int err;
netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2303,12 @@ static int macb_open(struct net_device *dev)
return err;
}
- napi_enable(&bp->napi);
-
bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_enable(&queue->napi);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -2237,10 +2323,14 @@ static int macb_open(struct net_device *dev)
static int macb_close(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
netif_tx_stop_all_queues(dev);
- napi_disable(&bp->napi);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ napi_disable(&queue->napi);
if (dev->phydev)
phy_stop(dev->phydev);
@@ -2270,7 +2360,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp)
{
- unsigned int i;
+ struct macb_queue *queue;
+ unsigned int i, q, idx;
+ unsigned long *stat;
+
u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2380,11 @@ static void gem_update_stats(struct macb *bp)
*(++p) += val;
}
}
+
+ idx = GEM_STATS_LEN;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+ bp->ethtool_stats[idx++] = *stat;
}
static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2432,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
bp = netdev_priv(dev);
gem_update_stats(bp);
- memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+ memcpy(data, &bp->ethtool_stats, sizeof(u64)
+ * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
}
static int gem_get_sset_count(struct net_device *dev, int sset)
{
+ struct macb *bp = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return GEM_STATS_LEN;
+ return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2349,13 +2450,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
+ char stat_string[ETH_GSTRING_LEN];
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned int i;
+ unsigned int q;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
memcpy(p, gem_statistics[i].stat_string,
ETH_GSTRING_LEN);
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+ snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+ q, queue_statistics[i].stat_string);
+ memcpy(p, stat_string, ETH_GSTRING_LEN);
+ }
+ }
break;
}
}
@@ -2603,6 +2716,307 @@ static int macb_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
}
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+ struct ethtool_rx_fs_item *item;
+ u32 t2_scr;
+ int num_t2_scr;
+
+ num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ struct ethtool_rx_flow_spec *fs = &item->fs;
+ struct ethtool_tcpip4_spec *tp4sp_m;
+
+ if (fs->location >= num_t2_scr)
+ continue;
+
+ t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+ /* enable/disable screener regs for the flow entry */
+ t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+ /* only enable fields with no masking */
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+ if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+ t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+ if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+ t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+ else
+ t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+ gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+ }
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+ uint16_t index = fs->location;
+ u32 w0, w1, t2_scr;
+ bool cmp_a = false;
+ bool cmp_b = false;
+ bool cmp_c = false;
+
+ tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+ tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+ /* 1st compare reg - IP source address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4src;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+ cmp_a = true;
+ }
+
+ /* ignore field if any masking set */
+ if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+ /* 2nd compare reg - IP destination address */
+ w0 = 0;
+ w1 = 0;
+ w0 = tp4sp_v->ip4dst;
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+ w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+ cmp_b = true;
+ }
+
+ /* ignore both port fields if masking set in both */
+ if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+ /* 3rd compare reg - source port, destination port */
+ w0 = 0;
+ w1 = 0;
+ w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+ if (tp4sp_m->psrc == tp4sp_m->pdst) {
+ w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else {
+ /* only one port definition */
+ w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+ w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+ if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+ } else { /* dst port */
+ w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+ w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+ }
+ }
+ gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+ gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+ cmp_c = true;
+ }
+
+ t2_scr = 0;
+ t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+ t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+ if (cmp_a)
+ t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+ if (cmp_b)
+ t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+ if (cmp_c)
+ t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+ gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct ethtool_rx_fs_item *item, *newfs;
+ unsigned long flags;
+ int ret = -EINVAL;
+ bool added = false;
+
+ newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+ if (newfs == NULL)
+ return -ENOMEM;
+ memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+ netdev_dbg(netdev,
+ "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ /* find correct place to add in list */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location > newfs->fs.location) {
+ list_add_tail(&newfs->list, &item->list);
+ added = true;
+ break;
+ } else if (item->fs.location == fs->location) {
+ netdev_err(netdev, "Rule not added: location %d not free!\n",
+ fs->location);
+ ret = -EBUSY;
+ goto err;
+ }
+ }
+ if (!added)
+ list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+ gem_prog_cmp_regs(bp, fs);
+ bp->rx_fs_list.count++;
+ /* enable filtering if NTUPLE on */
+ if (netdev->features & NETIF_F_NTUPLE)
+ gem_enable_flow_filters(bp, 1);
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(newfs);
+ return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ struct ethtool_rx_flow_spec *fs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ /* disable screener regs for the flow entry */
+ fs = &(item->fs);
+ netdev_dbg(netdev,
+ "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+ fs->flow_type, (int)fs->ring_cookie, fs->location,
+ htonl(fs->h_u.tcp_ip4_spec.ip4src),
+ htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+ htons(fs->h_u.tcp_ip4_spec.psrc),
+ htons(fs->h_u.tcp_ip4_spec.pdst));
+
+ gem_writel_n(bp, SCRT2, fs->location, 0);
+
+ list_del(&item->list);
+ bp->rx_fs_list.count--;
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ kfree(item);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+ return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (item->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ struct ethtool_rx_fs_item *item;
+ uint32_t cnt = 0;
+
+ list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = item->fs.location;
+ cnt++;
+ }
+ cmd->data = bp->max_tuples;
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->rx_fs_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gem_get_flow_entry(netdev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct macb *bp = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.location >= bp->max_tuples)
+ || (cmd->fs.ring_cookie >= bp->num_queues)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gem_add_flow_filter(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gem_del_flow_filter(netdev, cmd);
+ break;
+ default:
+ netdev_err(netdev,
+ "Command parameter %d is not supported\n", cmd->cmd);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2628,6 +3042,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
+ .get_rxnfc = gem_get_rxnfc,
+ .set_rxnfc = gem_set_rxnfc,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3101,12 @@ static int macb_set_features(struct net_device *netdev,
gem_writel(bp, NCFGR, netcfg);
}
+ /* RX Flow Filters */
+ if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+ bool turn_on = features & NETIF_F_NTUPLE;
+
+ gem_enable_flow_filters(bp, turn_on);
+ }
return 0;
}
@@ -2850,7 +3272,7 @@ static int macb_init(struct platform_device *pdev)
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
int err;
- u32 val;
+ u32 val, reg;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3287,20 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
+ netif_napi_add(dev, &queue->napi, macb_poll, 64);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
+ queue->RBQP = GEM_RBQP(hw_q - 1);
+ queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1);
+ queue->RBQPH = GEM_RBQPH(hw_q - 1);
+ }
#endif
} else {
/* queue0 uses legacy registers */
@@ -2882,9 +3309,12 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
+ queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH;
+ queue->RBQPH = MACB_RBQPH;
+ }
#endif
}
@@ -2908,7 +3338,6 @@ static int macb_init(struct platform_device *pdev)
}
dev->netdev_ops = &macb_netdev_ops;
- netif_napi_add(dev, &bp->napi, macb_poll, 64);
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
@@ -2941,6 +3370,30 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
+ /* Check RX Flow Filters support.
+ * Max Rx flows set by availability of screeners & compare regs:
+ * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+ */
+ reg = gem_readl(bp, DCFG8);
+ bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
+ if (bp->max_tuples > 0) {
+ /* also needs one ethtype match to check IPv4 */
+ if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+ /* program this reg now */
+ reg = 0;
+ reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+ /* Filtering is supported in hw but don't enable it in kernel now */
+ dev->hw_features |= NETIF_F_NTUPLE;
+ /* init Rx flow definitions */
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ bp->rx_fs_list.count = 0;
+ spin_lock_init(&bp->rx_fs_lock);
+ } else
+ bp->max_tuples = 0;
+ }
+
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3430,35 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
- lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp)),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring)
+ &q->rx_ring_dma, GFP_KERNEL);
+ if (!q->rx_ring)
return -ENOMEM;
- lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+ q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
- if (!lp->rx_buffers) {
+ &q->rx_buffers_dma, GFP_KERNEL);
+ if (!q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
return -ENOMEM;
}
- addr = lp->rx_buffers_dma;
+ addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- desc = macb_rx_desc(lp, i);
+ desc = macb_rx_desc(q, i);
macb_set_addr(lp, desc, addr);
desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3468,10 @@ static int at91ether_start(struct net_device *dev)
desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
- lp->rx_tail = 0;
+ q->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
- macb_writel(lp, RBQP, lp->rx_ring_dma);
+ macb_writel(lp, RBQP, q->rx_ring_dma);
/* Enable Receive and Transmit */
ctl = macb_readl(lp, NCR);
@@ -3064,6 +3518,7 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
u32 ctl;
/* Disable Receiver and Transmitter */
@@ -3084,13 +3539,13 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp),
- lp->rx_ring, lp->rx_ring_dma);
- lp->rx_ring = NULL;
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- lp->rx_buffers, lp->rx_buffers_dma);
- lp->rx_buffers = NULL;
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
return 0;
}
@@ -3134,14 +3589,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) {
- p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+ p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
@@ -3163,12 +3619,12 @@ static void at91ether_rx(struct net_device *dev)
desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
- if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
- lp->rx_tail = 0;
+ if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+ q->rx_tail = 0;
else
- lp->rx_tail++;
+ q->rx_tail++;
- desc = macb_rx_desc(lp, lp->rx_tail);
+ desc = macb_rx_desc(q, q->rx_tail);
}
}
@@ -3394,7 +3850,6 @@ static int macb_probe(struct platform_device *pdev)
= macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
- struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata;
@@ -3500,18 +3955,6 @@ static int macb_probe(struct platform_device *pdev)
else
macb_get_hwaddr(bp);
- /* Power up the PHY if there is a GPIO reset */
- phy_node = of_get_next_available_child(np, NULL);
- if (phy_node) {
- int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
- if (gpio_is_valid(gpio)) {
- bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_direction_output(bp->reset_gpio, 1);
- }
- }
- of_node_put(phy_node);
-
err = of_get_phy_mode(np);
if (err < 0) {
pdata = dev_get_platdata(&pdev->dev);
@@ -3542,6 +3985,9 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
+ tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+ (unsigned long)bp);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -3558,10 +4004,6 @@ err_out_unregister_mdio:
of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
err_out_free_netdev:
free_netdev(dev);
@@ -3592,10 +4034,6 @@ static int macb_remove(struct platform_device *pdev)
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
-
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75eb34d2..043e3c11c42b 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
depends on 64BIT
---help---
This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@ config THUNDER_NIC_RGX
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT
+ imply PTP_1588_CLOCK
+ default y
+ ---help---
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f7c31a..946bba84e81d 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the Cavium ethernet device drivers.
#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000000..dd8561b8060b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000000..c87c9c684a33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (!clock->ptp_clock) {
+ err = -ENODEV;
+ goto error_stop;
+ }
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+ return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+ pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000000..be2bafc7beeb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab09e64..f38abf626412 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
size = octdevsize + priv_size + configsize +
(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
- buf = vmalloc(size);
+ buf = vzalloc(size);
if (!buf)
return NULL;
- memset(buf, 0, size);
-
oct = (struct octeon_device *)buf;
oct->priv = (void *)(buf + octdevsize);
oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
size = sizeof(struct octeon_ioq_vector) * num_ioqs;
- oct->ioq_vector = vmalloc(size);
+ oct->ioq_vector = vzalloc(size);
if (!oct->ioq_vector)
return 1;
- memset(oct->ioq_vector, 0, size);
for (i = 0; i < num_ioqs; i++) {
ioq_vector = &oct->ioq_vector[i];
ioq_vector->oct_dev = oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e618e318..4cacce5d2b16 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
struct u64_stats_sync syncp;
};
+struct cavium_ptp;
+
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
@@ -312,6 +314,33 @@ struct nicvf {
struct tasklet_struct qs_err_task;
struct work_struct reset_task;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -521,6 +551,11 @@ struct pfc {
u8 fc_tx;
};
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -540,6 +575,7 @@ union nic_mbx {
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
struct pfc pfc;
+ struct set_ptp ptp;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55b3e08..7ff66a8194e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -18,7 +18,7 @@
#include "q_struct.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-nic"
+#define DRV_NAME "nicpf"
#define DRV_VERSION "1.0"
struct hw_info {
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* TNS and TNS bypass modes are present only on 88xx */
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
/* Disable TNS mode on both interfaces */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
}
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
}
}
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
goto unlock;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d46337cf29..a16c48a1ebb2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
#define NIC_PF_MCAM_0_191_ENA (0x100000)
#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
#define NIC_PF_MCAM_CTRL (0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9cbf98b..5603f5ab1fee 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,14 +9,16 @@
/* ETHTOOL Support for VNIC_VF Device*/
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "q_struct.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
struct nicvf_stat {
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
return 0;
}
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
.set_pauseparam = nicvf_set_pauseparam,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = nicvf_get_ts_info,
.get_link_ksettings = nicvf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36c4c58..b68cde9f17d2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,13 +20,15 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <linux/net_tstamp.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
-#define DRV_NAME "thunder-nicvf"
+#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
/* Supported devices */
@@ -65,6 +67,11 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+struct nicvf_xdp_tx {
+ u64 dma_addr;
+ u8 qidx;
+};
+
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -500,14 +507,29 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+ /* Check if it's a recycled page, if not unmap the DMA mapping.
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
- struct sk_buff **skb)
+ struct rcv_queue *rq, struct sk_buff **skb)
{
struct xdp_buff xdp;
struct page *page;
+ struct nicvf_xdp_tx *xdp_tx = NULL;
u32 action;
- u16 len, offset = 0;
+ u16 len, err, offset = 0;
u64 dma_addr, cpu_addr;
void *orig_data;
@@ -521,10 +543,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
+ xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
xdp.data = (void *)cpu_addr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
rcu_read_lock();
@@ -540,18 +563,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
switch (action) {
case XDP_PASS:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
/* Build SKB and pass on packet to network stack */
*skb = build_skb(xdp.data,
@@ -564,6 +576,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
case XDP_TX:
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
+ case XDP_REDIRECT:
+ /* Save DMA address for use while transmitting */
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ xdp_tx->dma_addr = dma_addr;
+ xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+ if (!err)
+ return true;
+
+ /* Free the page on error */
+ nicvf_unmap_page(nic, page, dma_addr);
+ put_page(page);
+ break;
default:
bpf_warn_invalid_xdp_action(action);
/* fall through */
@@ -571,24 +597,51 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP:
- /* Check if it's a recycled page, if not
- * unmap the DMA mapping.
- *
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
+ nicvf_unmap_page(nic, page, dma_addr);
put_page(page);
return true;
}
return false;
}
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cqe_send_t *cqe_tx,
int budget, int *subdesc_cnt,
@@ -645,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -684,9 +742,25 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
skb_set_hash(skb, hash, hash_type);
}
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+ struct cqe_rx_t *cqe_rx,
+ struct snd_queue *sq, struct rcv_queue *rq)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +786,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
/* For XDP, ignore pkts spanning multiple pages */
if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
/* Packet consumed by XDP */
- if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
return;
} else {
skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -734,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
+ nicvf_set_rxtstamp(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -769,6 +844,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
struct snd_queue *sq = &qs->sq[cq_idx];
+ struct rcv_queue *rq = &qs->rq[cq_idx];
unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
spin_lock_bh(&cq->lock);
@@ -799,7 +875,7 @@ loop:
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -808,10 +884,12 @@ loop:
&tx_pkts, &tx_bytes);
tx_done++;
break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
case CQE_TYPE_INVALID:
case CQE_TYPE_RX_SPLIT:
case CQE_TYPE_RX_TCP:
- case CQE_TYPE_SEND_PTP:
/* Ignore for now */
break;
}
@@ -1307,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
nicvf_free_cq_poll(nic);
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
/* Clear multiqset info */
nic->pnicvf = nic;
return 0;
}
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
@@ -1382,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
@@ -1764,6 +1864,117 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
+ struct nicvf_xdp_tx *xdp_tx;
+ struct snd_queue *sq;
+ struct page *page;
+ int err, qidx;
+
+ if (!netif_running(netdev) || !nic->xdp_prog)
+ return -EINVAL;
+
+ page = virt_to_page(xdp->data);
+ xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+ qidx = xdp_tx->qidx;
+
+ if (xdp_tx->qidx >= nic->xdp_tx_queues)
+ return -EINVAL;
+
+ /* Get secondary Qset's info */
+ if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+ qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[qidx - 1];
+ if (!snic)
+ return -EINVAL;
+ qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qidx];
+ err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+ xdp_tx->dma_addr,
+ xdp->data_end - xdp->data);
+ if (err)
+ return -ENOMEM;
+
+ nicvf_xdp_sq_doorbell(snic, sq, qidx);
+ return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+ return;
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1775,6 +1986,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
+ .ndo_xdp_xmit = nicvf_xdp_xmit,
+ .ndo_xdp_flush = nicvf_xdp_flush,
+ .ndo_do_ioctl = nicvf_ioctl,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1784,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct nicvf *nic;
int err, qcount;
u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -1833,6 +2057,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->pdev = pdev;
nic->pnicvf = nic;
nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1946,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12dbde95b..3eae9ff9b53a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Reserve space for header modifications by BPF program */
if (rbdr->is_xdp)
- buf_len += XDP_PACKET_HEADROOM;
+ buf_len += XDP_HEADROOM;
/* Check if it's recycled */
if (pgcache)
@@ -224,8 +224,9 @@ ret:
nic->rb_page = NULL;
return -ENOMEM;
}
+
if (pgcache)
- pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ pgcache->dma_addr = *rbuf + XDP_HEADROOM;
nic->rb_page_offset += buf_len;
}
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
if (!rq->enable) {
nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
return;
}
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
/* all writes of RBDR data to be loaded into L2 Cache as well*/
rq->caching = 1;
+ /* Driver have no proper error path for failed XDP RX-queue info reg */
+ WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
/* Send a mailbox msg to PF to config RQ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
mbx.rq.qs_num = qs->vnic_id;
@@ -977,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
qs_cfg->be = 1;
#endif
qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
}
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -1236,7 +1244,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
int qentry;
if (subdesc_cnt > sq->xdp_free_cnt)
- return 0;
+ return -1;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
@@ -1247,7 +1255,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
sq->xdp_desc_cnt += subdesc_cnt;
- return 1;
+ return 0;
}
/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1384,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor
@@ -1625,7 +1656,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
if (page_ref_count(page) != 1)
return;
- len += XDP_PACKET_HEADROOM;
+ len += XDP_HEADROOM;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a3230773..7d1e4e2aaad0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
@@ -92,6 +94,9 @@
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
@@ -251,6 +256,7 @@ struct rcv_queue {
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct cmp_queue {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7796b8..91d34ea40e2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -21,7 +21,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-BGX"
+#define DRV_NAME "thunder_bgx"
#define DRV_VERSION "1.0"
struct lmac {
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5ab896..5a7567d31138 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
@@ -172,6 +174,7 @@
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 578c7f8f11bf..2d5e8dab1f70 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "thunder_bgx.h"
-#define DRV_NAME "thunder-xcv"
+#define DRV_NAME "thunder_xcv"
#define DRV_VERSION "1.0"
/* Register offsets */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 5713e83be08c..e2cdfa75673f 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -69,6 +69,7 @@ config CHELSIO_T4
depends on PCI && (IPV6 || IPV6=n)
select FW_LOADER
select MDIO
+ select ZLIB_DEFLATE
---help---
This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet
adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 6a015362c340..185fe8df7628 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3304,6 +3304,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->ethtool_ops = &cxgb_ethtool_ops;
netdev->min_mtu = 81;
netdev->max_mtu = ETH_MAX_MTU;
+ netdev->dev_port = pi->port_id;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..53b6a02c778e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
- cudbg_common.o cudbg_lib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+ if (size > pdbg_init->compress_buff_size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_init->compress_buff;
+ pin_buff->offset = 0;
+ pin_buff->size = size;
+ return 0;
+ }
+
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
- pdbg_buff->size -= size;
return 0;
}
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff)
{
- pdbg_buff->size += pin_buff->size;
+ /* Clear compression buffer for re-use */
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+ memset(pdbg_init->compress_buff, 0,
+ pdbg_init->compress_buff_size);
+
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 605689957496..b57acb8dc35b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
#ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
-struct card_mem {
- u16 size_edc0;
- u16 size_edc1;
- u16 mem_flag;
-};
-
struct cudbg_mbox_log {
struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
u8 data[0];
};
+static const char * const cudbg_region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+ bool exist; /* Does region exists in current memory? */
+ u32 start; /* Start wrt 0 */
+ u32 end; /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+ u32 base;
+ u32 limit;
+ u32 idx;
+};
+
+struct cudbg_meminfo {
+ struct cudbg_mem_desc avail[4];
+ struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+ u32 avail_c;
+ u32 mem_c;
+ u32 up_ram_lo;
+ u32 up_ram_hi;
+ u32 up_extmem2_lo;
+ u32 up_extmem2_hi;
+ u32 rx_pages_data[3];
+ u32 tx_pages_data[4];
+ u32 p_structs;
+ u32 reserved[12];
+ u32 port_used[4];
+ u32 port_alloc[4];
+ u32 loopback_used[NCHAN];
+ u32 loopback_alloc[NCHAN];
+};
+
struct cudbg_cim_pif_la {
int size;
u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16];
};
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
#define CUDBG_MAX_FL_QIDS 1024
struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
};
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+ {0x0, 0x34},
+ {0x3c, 0x40},
+ {0x50, 0x64},
+ {0x70, 0x80},
+ {0x94, 0xa0},
+ {0xb0, 0xb8},
+ {0xd0, 0xd4},
+ {0x100, 0x128},
+ {0x140, 0x148},
+ {0x150, 0x164},
+ {0x170, 0x178},
+ {0x180, 0x194},
+ {0x1a0, 0x1b8},
+ {0x1c0, 0x208},
+};
+
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
};
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+ {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1ee62c5..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
/* Error codes */
#define CUDBG_STATUS_NO_MEM -19
#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19,
+ CUDBG_MC0 = 20,
+ CUDBG_MC1 = 21,
CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
CUDBG_SGE_INDIRECT = 37,
CUDBG_ULPRX_LA = 41,
CUDBG_TP_LA = 43,
+ CUDBG_MEMINFO = 44,
CUDBG_CIM_PIF_LA = 45,
CUDBG_CLK = 46,
CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_PCIE_CONFIG = 55,
CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PBT_TABLE = 65,
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
+ CUDBG_HMA = 68,
CUDBG_MAX_ENTITY = 70,
};
@@ -81,6 +87,10 @@ struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
+ u8 compress_type; /* Type of compression to use */
+ void *compress_buff; /* Compression buffer */
+ u32 compress_buff_size; /* Compression buffer size */
+ void *workspace; /* Workspace for zlib */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf88d18f..557fd8bfd54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,18 +15,65 @@
*
*/
+#include <linux/sort.h>
+
#include "t4_regs.h"
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
#include "cudbg_entity.h"
+#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
{
- cudbg_update_buff(pin_buff, dbg_buff);
- cudbg_put_buff(pin_buff, dbg_buff);
+ struct cudbg_buffer temp_in_buff = { 0 };
+ int bytes_left, bytes_read, bytes;
+ u32 offset = dbg_buff->offset;
+ int rc;
+
+ temp_in_buff.offset = pin_buff->offset;
+ temp_in_buff.data = pin_buff->data;
+ temp_in_buff.size = pin_buff->size;
+
+ bytes_left = pin_buff->size;
+ bytes_read = 0;
+ while (bytes_left > 0) {
+ /* Do compression in smaller chunks */
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+ temp_in_buff.size = bytes;
+ rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+ if (rc)
+ return rc;
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+ pin_buff->size = dbg_buff->offset - offset;
+ return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ int rc = 0;
+
+ if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+ cudbg_update_buff(pin_buff, dbg_buff);
+ } else {
+ rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+ if (rc)
+ goto out;
+ }
+
+out:
+ cudbg_put_buff(pdbg_init, pin_buff);
+ return rc;
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
return 0;
}
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct cudbg_mem_desc *)a)->base -
+ ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff)
+{
+ struct cudbg_mem_desc *md;
+ u32 lo, hi, used, alloc;
+ int n, i;
+
+ memset(meminfo_buff->avail, 0,
+ ARRAY_SIZE(meminfo_buff->avail) *
+ sizeof(struct cudbg_mem_desc));
+ memset(meminfo_buff->mem, 0,
+ (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+ md = meminfo_buff->mem;
+
+ for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+ meminfo_buff->mem[i].limit = 0;
+ meminfo_buff->mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 0;
+ i++;
+ }
+
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(padap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 3;
+ i++;
+ }
+
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 2;
+ i++;
+ }
+
+ if (lo & HMA_MUX_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 5;
+ i++;
+ }
+ }
+
+ if (!i) /* no memory available */
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ meminfo_buff->avail_c = i;
+ sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+ (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(padap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region);
+ if (!is_t4(padap->params.chip)) {
+ u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+ u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+ u32 size = 0;
+
+ if (is_t5(padap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(padap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = padap->vres.ocq.start;
+ if (padap->vres.ocq.size)
+ md->limit = md->base + padap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (meminfo_buff->avail[n].limit <
+ meminfo_buff->avail[n + 1].base)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ if (meminfo_buff->avail[n].limit)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ n = md - meminfo_buff->mem;
+ meminfo_buff->mem_c = n;
+
+ sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+
+ lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_ram_lo = lo;
+ meminfo_buff->up_ram_hi = hi;
+
+ lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_extmem2_lo = lo;
+ meminfo_buff->up_extmem2_hi = hi;
+
+ lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
+ meminfo_buff->rx_pages_data[1] =
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+ meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+ lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+ meminfo_buff->tx_pages_data[1] =
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+ meminfo_buff->tx_pages_data[2] =
+ hi >= (1 << 20) ? 'M' : 'K';
+ meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+ meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->port_used[i] = used;
+ meminfo_buff->port_alloc[i] = alloc;
+ }
+
+ for (i = 0; i < padap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->loopback_used[i] = used;
+ meminfo_buff->loopback_alloc[i] = alloc;
+ }
+
+ return 0;
+}
+
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
@@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
- rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
}
dparams = &padap->params.devlog;
- rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
@@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
}
size += sizeof(cfg);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
NULL);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
int size, rc;
size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
(u32 *)temp_buff.data,
(u32 *)((char *)temp_buff.data +
5 * CIM_MALA_SIZE));
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
struct cudbg_cim_qcfg *cim_qcfg_data;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
&temp_buff);
if (rc)
return rc;
@@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
cim_qcfg_data->obq_wr);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
cim_qcfg_data->thres);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
/* collect CIM OBQ */
qsize = cudbg_cim_obq_size(padap, qid);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
}
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+ struct cudbg_meminfo *mem_info,
+ u8 mem_type, u8 *idx)
+{
+ u8 i, flag;
+
+ switch (mem_type) {
+ case MEM_EDC0:
+ flag = EDC0_FLAG;
+ break;
+ case MEM_EDC1:
+ flag = EDC1_FLAG;
+ break;
+ case MEM_MC0:
+ /* Some T5 cards have both MC0 and MC1. */
+ flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+ break;
+ case MEM_MC1:
+ flag = MC1_FLAG;
+ break;
+ case MEM_HMA:
+ flag = HMA_FLAG;
+ break;
+ default:
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+ }
+
+ for (i = 0; i < mem_info->avail_c; i++) {
+ if (mem_info->avail[i].idx == flag) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, const char *region_name,
+ struct cudbg_mem_desc *mem_desc)
+{
+ u8 mc, found = 0;
+ u32 i, idx = 0;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+ if (!strcmp(cudbg_region[i], region_name)) {
+ found = 1;
+ idx = i;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ found = 0;
+ for (i = 0; i < meminfo->mem_c; i++) {
+ if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+ continue; /* Skip holes */
+
+ if (!(meminfo->mem[i].limit))
+ meminfo->mem[i].limit =
+ i < meminfo->mem_c - 1 ?
+ meminfo->mem[i + 1].base - 1 : ~0;
+
+ if (meminfo->mem[i].idx == idx) {
+ /* Check if the region exists in @mem_type memory */
+ if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+ meminfo->mem[i].limit < meminfo->avail[mc].base)
+ return -EINVAL;
+
+ if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+ return -EINVAL;
+
+ memcpy(mem_desc, &meminfo->mem[i],
+ sizeof(struct cudbg_mem_desc));
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, u32 *out_base, u32 *out_end)
+{
+ u8 mc_idx;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+ if (rc)
+ return rc;
+
+ if (*out_base < meminfo->avail[mc_idx].base)
+ *out_base = 0;
+ else
+ *out_base -= meminfo->avail[mc_idx].base;
+
+ if (*out_end > meminfo->avail[mc_idx].limit)
+ *out_end = meminfo->avail[mc_idx].limit;
+ else
+ *out_end -= meminfo->avail[mc_idx].base;
+
+ return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+ const char *region_name,
+ struct cudbg_region_info *payload)
+{
+ struct cudbg_mem_desc mem_desc = { 0 };
+ struct cudbg_meminfo meminfo;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+ &mem_desc);
+ if (rc) {
+ payload->exist = false;
+ return 0;
+ }
+
+ payload->exist = true;
+ payload->start = mem_desc.base;
+ payload->end = mem_desc.limit;
+
+ return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+ &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len,
struct cudbg_error *cudbg_err)
{
+ static const char * const region_name[] = { "Tx payload:",
+ "Rx payload:" };
unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_region_info payload[2];
+ u32 yield_count = 0;
int rc = 0;
+ u8 i;
+
+ /* Get TX/RX Payload region range if they exist */
+ memset(payload, 0, sizeof(payload));
+ for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+ rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+ &payload[i]);
+ if (rc)
+ return rc;
+
+ if (payload[i].exist) {
+ /* Align start and end to avoid wrap around */
+ payload[i].start = roundup(payload[i].start,
+ CUDBG_CHUNK_SIZE);
+ payload[i].end = rounddown(payload[i].end,
+ CUDBG_CHUNK_SIZE);
+ }
+ }
bytes_left = tot_len;
while (bytes_left > 0) {
+ /* As MC size is huge and read through PIO access, this
+ * loop will hold cpu for a longer time. OS may think that
+ * the process is hanged and will generate CPU stall traces.
+ * So yield the cpu regularly.
+ */
+ yield_count++;
+ if (!(yield_count % CUDBG_YIELD_ITERATION))
+ schedule();
+
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
- rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
+
+ for (i = 0; i < ARRAY_SIZE(payload); i++)
+ if (payload[i].exist &&
+ bytes_read >= payload[i].start &&
+ bytes_read + bytes <= payload[i].end)
+ /* TX and RX Payload regions can't overlap */
+ goto skip_read;
+
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
bytes_read, bytes,
@@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
+
+skip_read:
bytes_left -= bytes;
bytes_read += bytes;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
return rc;
}
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
- struct card_mem *mem_info)
-{
- struct adapter *padap = pdbg_init->adap;
- u32 value;
-
- value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
- value = EDRAM0_SIZE_G(value);
- mem_info->size_edc0 = (u16)value;
-
- value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
- value = EDRAM1_SIZE_G(value);
- mem_info->size_edc1 = (u16)value;
-
- value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
- if (value & EDRAM0_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC0_FLAG);
- if (value & EDRAM1_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err)
{
@@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- struct card_mem mem_info = {0};
- unsigned long flag, size;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_meminfo mem_info;
+ unsigned long size;
+ u8 mc_idx;
int rc;
+ memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(padap, &mem_info);
+ if (rc)
+ return rc;
+
cudbg_t4_fwcache(pdbg_init, cudbg_err);
- cudbg_collect_mem_info(pdbg_init, &mem_info);
- switch (mem_type) {
- case MEM_EDC0:
- flag = (1 << EDC0_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
- break;
- case MEM_EDC1:
- flag = (1 << EDC1_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
- break;
- default:
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
+ rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+ if (rc)
+ return rc;
- if (mem_info.mem_flag & flag) {
- rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
- size, cudbg_err);
- if (rc)
- goto err;
- } else {
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
-err:
- return rc;
+ size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ cudbg_err);
}
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
MEM_EDC1);
}
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_HMA);
+}
+
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
- int rc;
+ int rc, nentries;
- rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+ nentries = t4_chip_rss_size(padap);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
rc = t4_read_rss(padap, (u16 *)temp_buff.data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
int vf, rc, vf_count;
vf_count = padap->params.arch.vfcount;
- rc = cudbg_get_buff(dbg_buff,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
vf_count * sizeof(struct cudbg_rss_vf_conf),
&temp_buff);
if (rc)
@@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
for (vf = 0; vf < vf_count; vf++)
t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh, true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
int rc;
- rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
struct cudbg_pm_stats *pm_stats_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
&temp_buff);
if (rc)
return rc;
@@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
for (i = 0; i < NTX_SCHED; ++i)
t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
&hw_sched_buff->ipg[i], true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ &temp_buff);
if (rc)
return rc;
@@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
struct cudbg_ulprx_la *ulprx_la_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
&temp_buff);
if (rc)
return rc;
@@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
ulprx_la_buff->size = ULPRX_LA_SIZE;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
int size, rc;
size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
+
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_meminfo *meminfo_buff;
+ int rc;
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ rc = cudbg_fill_meminfo(padap, meminfo_buff);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_cim_pif_la) +
2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
(u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
NULL, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
&temp_buff);
if (rc)
return rc;
@@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
clk_info_buff->finwait2_timer =
tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
pm_pio->ireg_local_offset);
ch_pm++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
u32 para[2], val[2];
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_tid_info_region_rev1),
&temp_buff);
if (rc)
return rc;
@@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
sizeof(struct cudbg_ver_hdr);
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (!is_fw_attached(pdbg_init))
+ goto fill_tid;
+
#define FW_PARAM_PFVF_A(param) \
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
@@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->uotid_base = val[0];
@@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->hpftid_base = val[0];
tid->nhpftids = val[1] - val[0] + 1;
}
+#undef FW_PARAM_PFVF_A
+
+fill_tid:
tid->ntids = padap->tids.ntids;
tid->nstids = padap->tids.nstids;
tid->stid_base = padap->tids.stid_base;
@@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
-#undef FW_PARAM_PFVF_A
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size, *value, j;
+ int i, rc, n;
+
+ size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ value = (u32 *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ for (j = t5_pcie_config_array[i][0];
+ j <= t5_pcie_config_array[i][1]; j += 4) {
+ t4_hw_pci_read_cfg4(padap, j, value);
+ value++;
+ }
+ }
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+ int index, bit, bit_pos = 0;
+
+ switch (type) {
+ case CTXT_EGRESS:
+ bit_pos = 176;
+ break;
+ case CTXT_INGRESS:
+ bit_pos = 141;
+ break;
+ case CTXT_FLM:
+ bit_pos = 89;
+ break;
+ }
+ index = bit_pos / 32;
+ bit = bit_pos % 32;
+ return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+ struct cudbg_region_info *ctx_info,
+ u8 *mem_type)
{
- u32 value, size;
+ struct cudbg_mem_desc mem_desc;
+ struct cudbg_meminfo meminfo;
+ u32 i, j, value, found;
u8 flq;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ /* Get EGRESS and INGRESS context region size */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ found = 0;
+ memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+ for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+ rc = cudbg_get_mem_region(padap, &meminfo, j,
+ cudbg_region[i],
+ &mem_desc);
+ if (!rc) {
+ found = 1;
+ rc = cudbg_get_mem_relative(padap, &meminfo, j,
+ &mem_desc.base,
+ &mem_desc.limit);
+ if (rc) {
+ ctx_info[i].exist = false;
+ break;
+ }
+ ctx_info[i].exist = true;
+ ctx_info[i].start = mem_desc.base;
+ ctx_info[i].end = mem_desc.limit;
+ mem_type[i] = j;
+ break;
+ }
+ }
+ if (!found)
+ ctx_info[i].exist = false;
+ }
+ /* Get FLM and CNM max qid. */
value = t4_read_reg(padap, SGE_FLM_CFG_A);
/* Get number of data freelist queues */
flq = HDRSTARTFLQ_G(value);
- size = CUDBG_MAX_FL_QIDS >> flq;
+ ctx_info[CTXT_FLM].exist = true;
+ ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
- /* Add extra space for congestion manager contexts.
- * The number of CONM contexts are same as number of freelist
+ /* The number of CONM contexts are same as number of freelist
* queues.
*/
- size += size;
+ ctx_info[CTXT_CNM].exist = true;
+ ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+ return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+ u32 i, size = 0;
+ int rc;
+
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < CTXT_CNM; i++) {
+ if (!region_info[i].exist) {
+ if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+ size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+ SGE_CTXT_SIZE;
+ continue;
+ }
+
+ size += (region_info[i].end - region_info[i].start + 1) /
+ SGE_CTXT_SIZE;
+ }
return size * sizeof(struct cudbg_ch_cntxt);
}
@@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
}
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+ u8 ctxt_type,
+ struct cudbg_ch_cntxt **out_buff)
+{
+ struct cudbg_ch_cntxt *buff = *out_buff;
+ int rc;
+ u32 j;
+
+ for (j = 0; j < max_qid; j++) {
+ cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+ rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = ctxt_type;
+ buff->cntxt_id = j;
+ buff++;
+ if (ctxt_type == CTXT_FLM) {
+ cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = j;
+ buff++;
+ }
+ }
+
+ *out_buff = buff;
+}
+
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
struct adapter *padap = pdbg_init->adap;
+ u32 j, size, max_ctx_size, max_ctx_qid;
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u32 size, i = 0;
+ u64 *dst_off, *src_off;
+ u8 *ctx_buf;
+ u8 i, k;
int rc;
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
rc = cudbg_dump_context_size(padap);
if (rc <= 0)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
size = rc;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
+ /* Get buffer with enough space to read the biggest context
+ * region in memory.
+ */
+ max_ctx_size = max(region_info[CTXT_EGRESS].end -
+ region_info[CTXT_EGRESS].start + 1,
+ region_info[CTXT_INGRESS].end -
+ region_info[CTXT_INGRESS].start + 1);
+
+ ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+ if (!ctx_buf) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return -ENOMEM;
+ }
+
buff = (struct cudbg_ch_cntxt *)temp_buff.data;
- while (size > 0) {
- buff->cntxt_type = CTXT_FLM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
- buff->cntxt_type = CTXT_CNM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
+ /* Collect EGRESS and INGRESS context data.
+ * In case of failures, fallback to collecting via FW or
+ * backdoor access.
+ */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ if (!region_info[i].exist) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
- i++;
+ max_ctx_size = region_info[i].end - region_info[i].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init)) {
+ t4_sge_ctxt_flush(padap, padap->mbox, i);
+
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+ region_info[i].start, max_ctx_size,
+ (__be32 *)ctx_buf, 1);
+ }
+
+ if (rc || !is_fw_attached(pdbg_init)) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
+
+ for (j = 0; j < max_ctx_qid; j++) {
+ src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+ dst_off = (u64 *)buff->data;
+
+ /* The data is stored in 64-bit cpu order. Convert it
+ * to big endian before parsing.
+ */
+ for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+ dst_off[k] = cpu_to_be64(src_off[k]);
+
+ rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = i;
+ buff->cntxt_id = j;
+ buff++;
+ }
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ kvfree(ctx_buf);
+
+ /* Collect FREELIST and CONGESTION MANAGER contexts */
+ max_ctx_size = region_info[CTXT_FLM].end -
+ region_info[CTXT_FLM].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+ /* Since FLM and CONM are 1-to-1 mapped, the below function
+ * will fetch both FLM and CONM contexts.
+ */
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap,
mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
}
-static int cudbg_collect_tcam_index(struct adapter *padap,
+static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
struct cudbg_mps_tcam *tcam, u32 idx)
{
+ struct adapter *padap = pdbg_init->adap;
u64 tcamy, tcamx, val;
u32 ctl, data2;
int rc = 0;
@@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap,
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
FW_LDST_CMD_IDX_V(idx));
- rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
- if (rc)
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+
+ if (rc || !is_fw_attached(pdbg_init)) {
cudbg_mps_rpl_backdoor(padap, &mps_rplc);
- else
+ /* Ignore error since we collected directly from
+ * reading registers.
+ */
+ rc = 0;
+ } else {
mps_rplc = ldst_cmd.u.mps.rplc;
+ }
tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
@@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
n = padap->params.arch.mps_tcam_size;
size = sizeof(struct cudbg_mps_tcam) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tcam = (struct cudbg_mps_tcam *)temp_buff.data;
for (i = 0; i < n; i++) {
- rc = cudbg_collect_tcam_index(padap, tcam, i);
+ rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
total_size += sizeof(struct cudbg_mps_tcam);
@@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
if (!total_size) {
rc = CUDBG_SYSTEM_ERROR;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
&temp_buff);
if (rc)
return rc;
@@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
size += sizeof(struct cudbg_tcam);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
bytes += sizeof(struct cudbg_tid_data);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
int rc;
size = sizeof(u16) * NMTUS * NCCTRL_WIN;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
t4_read_cong_tbl(padap, (void *)temp_buff.data);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
}
ma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
u32 i, j;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
@@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ u32 local_offset, local_range;
struct ireg_buf *up_cim;
+ u32 size, j, iter;
+ u32 instance = 0;
int i, rc, n;
- u32 size;
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else
+ return CUDBG_STATUS_NOT_IMPLEMENTED;
+
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t5_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t5_up_cim_reg_array[i][3];
+ instance = t5_up_cim_reg_array[i][4];
} else if (is_t6(padap->params.chip)) {
up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t6_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t6_up_cim_reg_array[i][3];
+ instance = t6_up_cim_reg_array[i][4];
}
- rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
- up_cim_reg->ireg_offset_range, buff);
- if (rc) {
- cudbg_put_buff(&temp_buff, dbg_buff);
- return rc;
+ switch (instance) {
+ case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x120;
+ local_range = 1;
+ break;
+ case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x10;
+ local_range = 1;
+ break;
+ default:
+ iter = 1;
+ local_offset = 0;
+ local_range = up_cim_reg->ireg_offset_range;
+ break;
+ }
+
+ for (j = 0; j < iter; j++, buff++) {
+ rc = t4_cim_read(padap,
+ up_cim_reg->ireg_local_offset +
+ (j * local_offset), local_range, buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
up_cim++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
int i, rc;
u32 addr;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_pbt_tables),
&temp_buff);
if (rc)
return rc;
@@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_dynamic[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_static[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->lrf_table[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_data[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
}
mboxlog++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
hma_fli->ireg_local_offset);
hma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e33e86..eebefe7cd18e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
int cudbg_dump_context_size(struct adapter *padap);
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
+ CUDBG_COMPRESSION_ZLIB,
};
struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..25cc06d75cff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+ struct cudbg_buffer *pin_buff)
+{
+ if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+ pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+ pin_buff->offset = 0;
+ pin_buff->size = sizeof(struct cudbg_compress_hdr);
+ pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+ return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer temp_buff = { 0 };
+ struct z_stream_s compress_stream;
+ struct cudbg_compress_hdr *c_hdr;
+ int rc;
+
+ /* Write compression header to output buffer before compression */
+ rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+ if (rc)
+ return rc;
+
+ c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+ c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+ memset(&compress_stream, 0, sizeof(struct z_stream_s));
+ compress_stream.workspace = pdbg_init->workspace;
+ rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ compress_stream.next_in = pin_buff->data;
+ compress_stream.avail_in = pin_buff->size;
+ compress_stream.next_out = pout_buff->data + pout_buff->offset;
+ compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+ rc = zlib_deflate(&compress_stream, Z_FINISH);
+ if (rc != Z_STREAM_END)
+ return CUDBG_SYSTEM_ERROR;
+
+ rc = zlib_deflateEnd(&compress_stream);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ c_hdr->compress_size = compress_stream.total_out;
+ c_hdr->decompress_size = pin_buff->size;
+ pout_buff->offset += compress_stream.total_out;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..60d23805dfc3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+ u32 compress_id;
+ u64 decompress_size;
+ u64 compress_size;
+ u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+ return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL);
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e3c42a..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -58,6 +58,13 @@
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
+/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,7 +84,8 @@ enum {
MEM_EDC1,
MEM_MC,
MEM_MC0 = MEM_MC,
- MEM_MC1
+ MEM_MC1,
+ MEM_HMA,
};
enum {
@@ -311,6 +319,7 @@ struct vpd_params {
};
struct pci_params {
+ unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
@@ -344,7 +353,6 @@ struct adapter_params {
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
- unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers; /* firmware version */
unsigned int bs_vers; /* bootstrap version */
@@ -564,6 +572,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
};
struct rx_sw_desc;
@@ -819,12 +828,17 @@ struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
unsigned int tx_rate;
bool pf_set_mac;
+ u16 vlan;
};
struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -839,6 +853,10 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __be16 geneve_port;
+ u8 geneve_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -868,7 +886,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -968,6 +989,11 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1305,6 +1331,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1423,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
+/**
+ * t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ * @fw_mod_type: the Firmware Mofule Type
+ *
+ * Return whether the Firmware Module Type represents a real Transceiver
+ * Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+ fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1512,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1621,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1653,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1696,8 +1745,23 @@ void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625e9833..30485f9a598f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,14 @@
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
+#include "cudbg_zlib.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+ { CUDBG_MC0, cudbg_collect_mc0_meminfo },
+ { CUDBG_MC1, cudbg_collect_mc1_meminfo },
+ { CUDBG_HMA, cudbg_collect_hma_meminfo },
};
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
{ CUDBG_TP_LA, cudbg_collect_tp_la },
+ { CUDBG_MEMINFO, cudbg_collect_meminfo },
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
{ CUDBG_CLK, cudbg_collect_clk_info },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
len = cudbg_mbytes_to_bytes(len);
break;
+ case CUDBG_MC0:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM0_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ len = EXT_MEM0_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_MC1:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM1_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
case CUDBG_RSS:
- len = RSS_NENTRIES * sizeof(u16);
+ len = t4_chip_rss_size(adap) * sizeof(u16);
break;
case CUDBG_RSS_VF_CONF:
len = adap->params.arch.vfcount *
@@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TP_LA:
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
+ case CUDBG_MEMINFO:
+ len = sizeof(struct cudbg_meminfo);
+ break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_PCIE_CONFIG:
+ len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ break;
case CUDBG_DUMP_CONTEXT:
len = cudbg_dump_context_size(adap);
break;
@@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ n = 0;
+ if (is_t5(adap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(adap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_PBT_TABLE:
@@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
}
break;
+ case CUDBG_HMA:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & HMA_MUX_F) {
+ /* In T6, there's no MC1. So, HMA shares MC1
+ * address space.
+ */
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
default:
break;
}
@@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
+ u32 wsize;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
}
}
+ /* If compression is enabled, a smaller destination buffer is enough */
+ wsize = cudbg_get_workspace_size();
+ if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+ len = CUDBG_DUMP_BUFF_SIZE;
+
return len;
}
@@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
- struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
- u32 entity_size, i;
- u32 total_size = 0;
+ u32 i, total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
- /* Skip entities that won't fit in output buffer */
- entity_size = cxgb4_get_entity_length(adap, e->entity);
- if (entity_size >
- pdbg_init->outbuf_size - *tot_size - total_size)
- continue;
-
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
@@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
*tot_size += total_size;
}
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+ u32 workspace_size;
+
+ workspace_size = cudbg_get_workspace_size();
+ pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+ workspace_size);
+ if (!pdbg_init->compress_buff)
+ return -ENOMEM;
+
+ pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+ pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+ CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+ return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+ if (pdbg_init->compress_buff)
+ vfree(pdbg_init->compress_buff);
+}
+
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
- struct cudbg_init cudbg_init = { 0 };
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
+ struct cudbg_init cudbg_init;
struct cudbg_hdr *cudbg_hdr;
+ int rc;
size = *buf_size;
+ memset(&cudbg_init, 0, sizeof(struct cudbg_init));
cudbg_init.adap = adap;
cudbg_init.outbuf = buf;
cudbg_init.outbuf_size = size;
@@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
- cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
@@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
if (size < min_size)
return -ENOMEM;
+ rc = cudbg_get_workspace_size();
+ if (rc) {
+ /* Zlib available. So, use zlib deflate */
+ cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+ rc = cudbg_alloc_compress_buff(&cudbg_init);
+ if (rc) {
+ /* Ignore error and continue without compression. */
+ dev_warn(adap->pdev_dev,
+ "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
+ rc);
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ rc = 0;
+ }
+ } else {
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ }
+
+ cudbg_hdr->compress_type = cudbg_init.compress_type;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
@@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
buf,
&total_size);
+ cudbg_free_compress_buff(&cudbg_init);
cudbg_hdr->data_len = total_size;
- *buf_size = total_size;
+ if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+ *buf_size = size;
+ else
+ *buf_size = total_size;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5aa2214..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,8 +20,12 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 917663b35603..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
*/
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI */
- vniy = ((data2 & DATAVIDH2_F) << 23) |
+ vniy = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
dip_hit = data2 & DATADIPHIT_F;
} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
+ vnix = 0;
ctl |= CTLXYBITSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI mask */
- vnix = ((data2 & DATAVIDH2_F) << 23) |
+ vnix = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
}
} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
addr[1], addr[2], addr[3],
addr[4], addr[5],
(unsigned long long)mask,
- vniy, vnix, dip_hit ? 'Y' : 'N',
+ vniy, (vnix | vniy),
+ dip_hit ? 'Y' : 'N',
port_num,
(cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
static int rss_open(struct inode *inode, struct file *file)
{
- int ret;
- struct seq_tab *p;
struct adapter *adap = inode->i_private;
+ int ret, nentries;
+ struct seq_tab *p;
- p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+ nentries = t4_chip_rss_size(adap);
+ p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
if (!p)
return -ENOMEM;
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
+ unsigned int tid_start = 0;
struct adapter *adap = seq->private;
const struct tid_info *t = &adap->tids;
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ if (chip > CHELSIO_T5)
+ tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
- adap->tids.hash_base,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ sb - 1, adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", tid_start,
+ tid_start + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
-struct mem_desc {
- unsigned int base;
- unsigned int limit;
- unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
- return ((const struct mem_desc *)a)->base -
- ((const struct mem_desc *)b)->base;
-}
-
static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to)
{
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
- "MC0:", "MC1:"};
- static const char * const region[] = {
- "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
- "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
- "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
- "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
- "RQUDP region:", "PBL region:", "TXPBL region:",
- "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
- "On-chip queues:"
- };
-
- int i, n;
- u32 lo, hi, used, alloc;
- struct mem_desc avail[4];
- struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ "MC0:", "MC1:", "HMA:"};
struct adapter *adap = seq->private;
+ struct cudbg_meminfo meminfo;
+ int i, rc;
- for (i = 0; i < ARRAY_SIZE(mem); i++) {
- mem[i].limit = 0;
- mem[i].idx = i;
- }
-
- /* Find and sort the populated memory ranges */
- i = 0;
- lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
- if (lo & EDRAM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
- avail[i].base = EDRAM0_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
- avail[i].idx = 0;
- i++;
- }
- if (lo & EDRAM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
- avail[i].base = EDRAM1_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
- avail[i].idx = 1;
- i++;
- }
-
- if (is_t5(adap->params.chip)) {
- if (lo & EXT_MEM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
- avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
- avail[i].idx = 3;
- i++;
- }
- if (lo & EXT_MEM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
- avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
- avail[i].idx = 4;
- i++;
- }
- } else {
- if (lo & EXT_MEM_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- avail[i].base = EXT_MEM_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
- avail[i].idx = 2;
- i++;
- }
- }
- if (!i) /* no memory available */
- return 0;
- sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
- (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
- /* the next few have explicit upper bounds */
- md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
- PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
- md++;
-
- md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
- PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
- md++;
-
- if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
- hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
- md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- } else {
- hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- md->base = t4_read_reg(adap,
- LE_DB_HASH_TBL_BASE_ADDR_A);
- }
- md->limit = 0;
- } else {
- md->base = 0;
- md->idx = ARRAY_SIZE(region); /* hide it */
- }
- md++;
-
-#define ulp_region(reg) do { \
- md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
- (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
- ulp_region(RX_ISCSI);
- ulp_region(RX_TDDP);
- ulp_region(TX_TPT);
- ulp_region(RX_STAG);
- ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
- ulp_region(RX_PBL);
- ulp_region(TX_PBL);
-#undef ulp_region
- md->base = 0;
- md->idx = ARRAY_SIZE(region);
- if (!is_t4(adap->params.chip)) {
- u32 size = 0;
- u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
- u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
- if (is_t5(adap->params.chip)) {
- if (sge_ctrl & VFIFO_ENABLE_F)
- size = DBVFIFO_SIZE_G(fifo_size);
- } else {
- size = T6_DBVFIFO_SIZE_G(fifo_size);
- }
-
- if (size) {
- md->base = BASEADDR_G(t4_read_reg(adap,
- SGE_DBVFIFO_BADDR_A));
- md->limit = md->base + (size << 2) - 1;
- }
- }
-
- md++;
-
- md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
- md->limit = 0;
- md++;
- md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
- md->limit = 0;
- md++;
-
- md->base = adap->vres.ocq.start;
- if (adap->vres.ocq.size)
- md->limit = md->base + adap->vres.ocq.size - 1;
- else
- md->idx = ARRAY_SIZE(region); /* hide it */
- md++;
-
- /* add any address-space holes, there can be up to 3 */
- for (n = 0; n < i - 1; n++)
- if (avail[n].limit < avail[n + 1].base)
- (md++)->base = avail[n].limit;
- if (avail[n].limit)
- (md++)->base = avail[n].limit;
-
- n = md - mem;
- sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+ memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(adap, &meminfo);
+ if (rc)
+ return -ENXIO;
- for (lo = 0; lo < i; lo++)
- mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
- avail[lo].limit - 1);
+ for (i = 0; i < meminfo.avail_c; i++)
+ mem_region_show(seq, memory[meminfo.avail[i].idx],
+ meminfo.avail[i].base,
+ meminfo.avail[i].limit - 1);
seq_putc(seq, '\n');
- for (i = 0; i < n; i++) {
- if (mem[i].idx >= ARRAY_SIZE(region))
+ for (i = 0; i < meminfo.mem_c; i++) {
+ if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* skip holes */
- if (!mem[i].limit)
- mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
- mem_region_show(seq, region[mem[i].idx], mem[i].base,
- mem[i].limit);
+ if (!meminfo.mem[i].limit)
+ meminfo.mem[i].limit =
+ i < meminfo.mem_c - 1 ?
+ meminfo.mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+ meminfo.mem[i].base, meminfo.mem[i].limit);
}
seq_putc(seq, '\n');
- lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP RAM:", lo, hi);
-
- lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP Extmem2:", lo, hi);
+ mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+ mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+ meminfo.up_extmem2_hi);
- lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- PMRXMAXPAGE_G(lo),
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
- (lo & PMRXNUMCHN_F) ? 2 : 1);
+ meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+ meminfo.rx_pages_data[2]);
- lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
- hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- PMTXMAXPAGE_G(lo),
- hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
- seq_printf(seq, "%u p-structs\n\n",
- t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
-
- for (i = 0; i < 4; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+ meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+
+ seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+ for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
- for (i = 0; i < adap->params.arch.nchan; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap,
- MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+ for (i = 0; i < adap->params.arch.nchan; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
+ i, meminfo.loopback_used[i],
+ meminfo.loopback_alloc[i]);
+
return 0;
}
@@ -3096,6 +2906,8 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb338212f5af..7852d98bad75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static int cxgb4_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u8 sff8472_comp, sff_diag_type, sff_rev;
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (!t4_is_inserted_mod_type(pi->mod_type))
+ return -EINVAL;
+
+ switch (pi->port_type) {
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSA:
+ case FW_PORT_TYPE_SFP28:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+ SFF_8472_COMP_LEN, &sff8472_comp);
+ if (ret)
+ return ret;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+ SFP_DIAG_TYPE_LEN, &sff_diag_type);
+ if (ret)
+ return ret;
+
+ if (!sff8472_comp || (sff_diag_type & 4)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case FW_PORT_TYPE_QSFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_CR2_QSFP:
+ case FW_PORT_TYPE_CR4_QSFP:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+ SFF_REV_LEN, &sff_rev);
+ /* For QSFP type ports, revision value >= 3
+ * means the SFP is 8636 compliant.
+ */
+ if (ret)
+ return ret;
+ if (sff_rev >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eprom, u8 *data)
+{
+ int ret = 0, offset = eprom->offset, len = eprom->len;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ memset(data, 0, eprom->len);
+ if (offset + len <= I2C_PAGE_SIZE)
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+
+ /* offset + len spans 0xa0 and 0xa1 pages */
+ if (offset <= I2C_PAGE_SIZE) {
+ /* read 0xa0 page */
+ len = I2C_PAGE_SIZE - offset;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+ if (ret)
+ return ret;
+ offset = I2C_PAGE_SIZE;
+ /* Remaining bytes to be read from second page =
+ * Total length - bytes read from first page
+ */
+ len = eprom->len - len;
+ }
+ /* Read additional optical diagnostics from page 0xa2 if supported */
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+ offset, len, &data[eprom->len - len]);
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_dump = set_dump,
.get_dump_flag = get_dump_flag,
.get_dump_data = get_dump_data,
+ .get_module_info = cxgb4_get_module_info,
+ .get_module_eeprom = cxgb4_get_module_eeprom,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f308a253..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
if (ftid >= t->nftids)
ftid = -1;
} else {
- ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ if (is_t6(adap->params.chip)) {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 1);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region
+ * unallocated
+ */
+ bitmap_release_region(t->ftid_bmap, ftid, 1);
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
- /* this is only a lookup, keep the found region unallocated */
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
}
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
return -EBUSY;
}
- if (family == PF_INET)
+ if (family == PF_INET) {
__set_bit(fidx, t->ftid_bmap);
- else
- bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
return 0;
}
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET)
+ if (family == PF_INET) {
__clear_bit(fidx, t->ftid_bmap);
- else
- bitmap_release_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
}
@@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
- if (f->fs.hash && f->fs.type)
+ if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
unsigned int max_fidx, fidx;
struct filter_entry *f;
u32 iconf;
@@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
* insertion.
*/
if (fs->type == 0) { /* IPv4 */
- /* If our IPv4 filter isn't being written to a
- * multiple of four filter index and there's an IPv6
- * filter at the multiple of 4 base slot, then we
- * prevent insertion.
+ /* For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ * Hence we need to delete the filter in multiple of 4 slot.
*/
- fidx = filter_id & ~0x3;
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
if (fidx != filter_id &&
adapter->tids.ftid_tab[fidx].fs.type) {
f = &adapter->tids.ftid_tab[fidx];
@@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
}
} else { /* IPv6 */
- /* Ensure that the IPv6 filter is aligned on a
- * multiple of 4 boundary.
- */
- if (filter_id & 0x3) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
- return -EINVAL;
- }
+ if (chip_ver < CHELSIO_T6) {
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
- /* Check all except the base overlapping IPv4 filter slots. */
- for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ /* Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < filter_id + 4;
+ fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+ /* Check overlapping IPv4 filter slot */
+ fidx = filter_id + 1;
f = &adapter->tids.ftid_tab[fidx];
if (f->valid) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
- fidx);
- return -EINVAL;
+ pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+ __func__, fidx);
+ return -EBUSY;
}
}
}
@@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
fidx = filter_id + adapter->tids.ftid_base;
ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
if (ret)
return ret;
- /* Check to make sure the filter requested is writable ... */
+ /* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
if (ret) {
/* Clear the bits we have set above */
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
return ret;
}
@@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (f->valid)
clear_filter(adapter, f);
+ if (is_t6(adapter->params.chip) && fs->type &&
+ ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+ IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+ chip_ver);
+ return ret;
+ }
+ }
+
/* Convert the filter specification into our internal format.
* We copy the PF/VF specification into the Outer VLAN field
* here so the rest of the code -- including the interface to
@@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
ret = set_filter_wr(adapter, filter_id);
if (ret) {
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
clear_filter(adapter, f);
}
@@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
struct filter_entry *f;
unsigned int max_fidx;
int ret;
@@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
if (f->valid) {
f->ctx = ctx;
cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET);
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6f900ffe25cc..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
* called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
#define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
+ {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
{ 0, } \
@@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- return t4_sge_ctxt_flush(adap, adap->mbox);
+ return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
}
#ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
{
/* Turn carrier off since we don't have to transmit anything on this
* interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
}
/* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{
- unsigned int i;
u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ unsigned int i, vf, nvfs;
+ u16 a, b;
int err;
u8 *na;
- u16 a, b;
+ adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+ PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
- if (!err) {
- na = adap->params.vpd.na;
- for (i = 0; i < ETH_ALEN; i++)
- hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
- hex2val(na[2 * i + 1]));
- a = (hw_addr[0] << 8) | hw_addr[1];
- b = (hw_addr[1] << 8) | hw_addr[2];
- a ^= b;
- a |= 0x0200; /* locally assigned Ethernet MAC address */
- a &= ~0x0100; /* not a multicast Ethernet MAC address */
- macaddr[0] = a >> 8;
- macaddr[1] = a & 0xff;
-
- for (i = 2; i < 5; i++)
- macaddr[i] = hw_addr[i + 1];
-
- for (i = 0; i < adap->num_vfs; i++) {
- macaddr[5] = adap->pf * 16 + i;
- ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
- }
+ if (err)
+ return;
+
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+ vf < nvfs; vf++) {
+ macaddr[5] = adap->pf * 16 + vf;
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return ret;
}
-static int cxgb_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
return 0;
}
-static int cxgb_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct port_info *pi = netdev_priv(dev);
unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
return 0;
}
-#endif
+static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
+ return -EPROTONOSUPPORT;
+
+ ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
+ if (!ret) {
+ adap->vfinfo[vf].vlan = vlan;
+ return 0;
+ }
+
+ dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
+ ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+ return ret;
+}
+#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
@@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return cxgb4_tc_flower_replace(dev, cls_flower);
@@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev,
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EINVAL;
}
- if (!tc_can_offload(dev))
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!adapter->geneve_port_cnt ||
+ adapter->geneve_port != ti->port)
+ return; /* Invalid GENEVE destination port */
+
+ adapter->geneve_port_cnt--;
+ if (adapter->geneve_port_cnt)
+ return;
+
+ adapter->geneve_port = 0;
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (adapter->geneve_port_cnt &&
+ adapter->geneve_port == ti->port) {
+ adapter->geneve_port_cnt++;
+ return;
+ }
+
+ /* We will support only one GENEVE port */
+ if (adapter->geneve_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->geneve_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->geneve_port = ti->port;
+ adapter->geneve_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+ GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = dummy_open,
- .ndo_set_vf_mac = cxgb_set_vf_mac,
- .ndo_get_vf_config = cxgb_get_vf_config,
- .ndo_set_vf_rate = cxgb_set_vf_rate,
- .ndo_get_phys_port_id = cxgb_get_phys_port_id,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
};
#endif
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
@@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
- .get_drvinfo = get_drvinfo,
+ .get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -4096,7 +4294,7 @@ static int adap_init0(struct adapter *adap)
} else {
adap->vres.ncrypto_fc = val[0];
}
- adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->params.crypto = ntohs(caps_cmd.cryptocaps);
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
@@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
{
dev->type = ARPHRD_NONE;
dev->mtu = 0;
@@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
- struct adapter *adap = pci_get_drvdata(pdev);
- struct net_device *netdev;
- struct port_info *pi;
- char name[IFNAMSIZ];
- int err;
-
- snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
- dummy_setup);
- if (!netdev)
- return -ENOMEM;
-
- pi = netdev_priv(netdev);
- pi->adapter = adap;
- pi->tx_chan = adap->pf % adap->params.nports;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adap->port[0] = netdev;
- pi->port_id = 0;
-
- err = register_netdev(adap->port[0]);
- if (err) {
- pr_info("Unable to register VF mgmt netdev %s\n", name);
- free_netdev(adap->port[0]);
- adap->port[0] = NULL;
- return err;
- }
- return 0;
-}
-
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
- num_vfs = current_vfs;
- return num_vfs;
+ return current_vfs;
}
-
- /* Disable SRIOV when zero is passed.
- * One needs to disable SRIOV before modifying it, else
- * stack throws the below warning:
- * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ /* Note that the upper-level code ensures that we're never called with
+ * a non-zero "num_vfs" when we already have VFs instantiated. But
+ * it never hurts to code defensively.
*/
+ if (num_vfs != 0 && current_vfs != 0)
+ return -EBUSY;
+
+ /* Nothing to do for no change. */
+ if (num_vfs == current_vfs)
+ return num_vfs;
+
+ /* Disable SRIOV when zero is passed. */
if (!num_vfs) {
pci_disable_sriov(pdev);
- if (adap->port[0]) {
- unregister_netdev(adap->port[0]);
- adap->port[0] = NULL;
- }
+ /* free VF Management Interface */
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+
/* free VF resources */
+ adap->num_vfs = 0;
kfree(adap->vfinfo);
adap->vfinfo = NULL;
- adap->num_vfs = 0;
- return num_vfs;
+ return 0;
}
- if (num_vfs != current_vfs) {
- err = pci_enable_sriov(pdev, num_vfs);
+ if (!current_vfs) {
+ struct fw_pfvf_cmd port_cmd, port_rpl;
+ struct net_device *netdev;
+ unsigned int pmask, port;
+ struct pci_dev *pbridge;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ u32 devcap2;
+ u16 flags;
+ int pos;
+
+ /* If we want to instantiate Virtual Functions, then our
+ * parent bridge's PCI-E needs to support Alternative Routing
+ * ID (ARI) because our VFs will show up at function offset 8
+ * and above.
+ */
+ pbridge = pdev->bus->self;
+ pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+ pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+ !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+ /* Our parent bridge does not support ARI so issue a
+ * warning and skip instantiating the VFs. They
+ * won't be reachable.
+ */
+ dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+ pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+ PCI_FUNC(pbridge->devfn));
+ return -ENOTSUPP;
+ }
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adap->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+ err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
if (err)
return err;
+ pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+ port = ffs(pmask) - 1;
+ /* Allocate VF Management Interface. */
+ snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+ adap->pf);
+ netdev = alloc_netdev(sizeof(struct port_info),
+ name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+ if (!netdev)
+ return -ENOMEM;
- adap->num_vfs = num_vfs;
- err = config_mgmt_dev(pdev);
- if (err)
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ pi->lport = port;
+ pi->tx_chan = port;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+ pi->port_id = 0;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
return err;
+ }
+ /* Allocate and set up VF Information. */
+ adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (!adap->vfinfo) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return -ENOMEM;
+ }
+ cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+ }
+ /* Instantiate the requested number of VFs. */
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ pr_info("Unable to instantiate %d VFs\n", num_vfs);
+ if (!current_vfs) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ }
+ return err;
}
- adap->vfinfo = kcalloc(adap->num_vfs,
- sizeof(struct vf_info), GFP_KERNEL);
- if (adap->vfinfo)
- fill_vf_station_mac_addr(adap);
+ adap->num_vfs = num_vfs;
return num_vfs;
}
-#endif
+#endif /* CONFIG_PCI_IOV */
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
- u32 v, port_vec;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+
+ adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
goto out_unmap_bar0;
@@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+ pci_set_drvdata(pdev, adapter);
+
if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
- iounmap(regs);
-#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
+ return 0;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
-
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
adap_idx++;
-
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
-
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
- spin_lock_init(&adapter->mbox_lock);
-
- INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_fw_sge_queues(adapter);
return 0;
-sriov:
-#ifdef CONFIG_PCI_IOV
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto free_pci_region;
- }
-
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->regs = regs;
- adapter->adap_idx = adap_idx;
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto free_adapter;
- }
- spin_lock_init(&adapter->mbox_lock);
- INIT_LIST_HEAD(&adapter->mlist.list);
-
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
- &v, &port_vec);
- if (err < 0) {
- dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_mbox_log;
- }
-
- adapter->params.nports = hweight32(port_vec);
- pci_set_drvdata(pdev, adapter);
- return 0;
-
-free_mbox_log:
- kfree(adapter->mbox_log);
- free_adapter:
- kfree(adapter);
- free_pci_region:
- iounmap(regs);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- return err;
-#else
- return 0;
-#endif
-
out_free_dev:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
@@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PCI_IOV
else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
+ cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
}
@@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
-#ifdef CONFIG_PCI_IOV
- else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- }
-#endif
}
static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a6a55c..36563364bae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -43,7 +43,7 @@
#define STATS_CHECK_PERIOD (HZ / 2)
-struct ch_tc_pedit_fields pedits[] = {
+static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
ethtype_mask = 0;
}
+ if (ethtype_key == ETH_P_IPV6)
+ fs->type = 1;
+
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
VLAN_PRIO_SHIFT);
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
VLAN_PRIO_SHIFT);
- fs->val.ivlan = cpu_to_be16(vlan_tci);
- fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+ fs->val.ivlan = vlan_tci;
+ fs->mask.ivlan = vlan_tci_mask;
/* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets,
@@ -405,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
} else if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out = __dev_get_by_index(dev_net(in),
- ifindex);
+ struct net_device *out = tcf_mirred_dev(a);
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
@@ -582,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing */
} else if (is_tcf_mirred_egress_redirect(a)) {
struct adapter *adap = netdev2adap(dev);
- struct net_device *n_dev;
- unsigned int i, ifindex;
+ struct net_device *n_dev, *target_dev;
+ unsigned int i;
bool found = false;
- ifindex = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (ifindex == n_dev->ifindex) {
+ if (target_dev == n_dev) {
found = true;
break;
}
@@ -765,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
- flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(flower_entry))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
@@ -786,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
spin_unlock(&flower_entry->lock);
}
}
-walk_stop:
+
rhashtable_walk_stop(&iter);
+
} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13a964d..ab174bcfbfb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
/* Re-direct to specified port in hardware. */
if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *n_dev;
- unsigned int i, index;
+ struct net_device *n_dev, *target_dev;
bool found = false;
+ unsigned int i;
- index = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (index == n_dev->ifindex) {
+ if (target_dev == n_dev) {
fs->action = FILTER_SWITCH;
fs->eport = i;
found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 71a315bc1409..6b5fea4532f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->crypto = adap->params.crypto;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 08e709ab6dd4..1d37672902da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -297,6 +297,7 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
+ atomic_t ipsec_cnt;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
@@ -322,6 +323,7 @@ struct cxgb4_lld_info {
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
+ unsigned char crypto; /* crypto support */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned int cclk_ps; /* Core clock period in psec */
@@ -370,6 +372,7 @@ struct cxgb4_uld_info {
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
+ int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f937789..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -41,6 +41,7 @@
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
+#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -53,6 +54,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_uld.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -110,14 +112,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
@@ -134,11 +128,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter,
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
@@ -277,6 +266,7 @@ unwind:
out_err:
return -ENOMEM;
}
+EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q)
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
q->in_use -= avail;
}
}
+EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
@@ -770,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -788,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -810,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -827,13 +833,14 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
@@ -849,9 +856,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
@@ -903,6 +910,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
+EXPORT_SYMBOL(cxgb4_write_sgl);
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
@@ -921,14 +929,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
}
/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
/* Make sure that all writes to the TX Descriptors are committed
* before we tell the hardware about them.
@@ -995,9 +1003,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb();
}
}
+EXPORT_SYMBOL(cxgb4_ring_tx_db);
/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
+ * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
@@ -1007,8 +1016,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
+void cxgb4_inline_tx_skb(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
@@ -1030,6 +1039,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
if ((uintptr_t)p & 8)
*p = 0;
}
+EXPORT_SYMBOL(cxgb4_inline_tx_skb);
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
@@ -1154,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ else if (adapter->geneve_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_GENEVE;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ case TX_TNL_TYPE_GENEVE:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1177,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1199,6 +1311,12 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
+ ssi = skb_shinfo(skb);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (xfrm_offload(skb) && !ssi->gso_size)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_IPSEC_INLINE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -1215,7 +1333,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- reclaim_completed_tx(adap, &q->q, true);
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1227,7 +1345,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,11 +1360,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1264,39 +1386,63 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
- ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1341,13 +1487,13 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
dev_consume_skb_any(skb);
} else {
int last_desc;
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
+ cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
+ end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
@@ -1359,7 +1505,7 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
- ring_tx_db(adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
@@ -1369,9 +1515,9 @@ out_free: dev_kfree_skb_any(skb);
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
@@ -1446,13 +1592,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
- ring_tx_db(q->adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
@@ -1487,7 +1633,7 @@ static void restart_ctrlq(unsigned long data)
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
@@ -1500,14 +1646,15 @@ static void restart_ctrlq(unsigned long data)
}
}
if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
+ringdb:
+ if (written)
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
@@ -1650,7 +1797,7 @@ static void service_ofldq(struct sge_uld_txq *q)
*/
spin_unlock(&q->sendq.lock);
- reclaim_completed_tx(q->adap, &q->q, false);
+ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
@@ -1661,9 +1808,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
+ cxgb4_inline_tx_skb(skb, &q->q, pos);
+ else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
@@ -1694,9 +1841,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (void *)txq->desc;
}
- write_sgl(skb, &q->q, (void *)pos,
- end, hdr_len,
- (dma_addr_t *)skb->head);
+ cxgb4_write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
+ (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
@@ -1710,7 +1857,7 @@ static void service_ofldq(struct sge_uld_txq *q)
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
@@ -1725,7 +1872,7 @@ static void service_ofldq(struct sge_uld_txq *q)
kfree_skb(skb);
}
if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f15579..047609ef0515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
- if (pcie_fw & PCIE_FW_ERR_F)
+ if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
+ adap->flags &= ~FW_OK;
+ }
}
/*
@@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* wait [for a while] till we're at the front [or bail out with an
* EBUSY] ...
*/
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_add_tail(&entry.list, &adap->mlist.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
delay_idx = 0;
ms = delay[0];
@@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
*/
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
t4_fatal_err(adap);
return ret;
}
@@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ * MEM_HMA = 4
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- if (mtype != MEM_MC1)
+ if (mtype == MEM_HMA) {
+ memoffset = 2 * (edc_size * 1024 * 1024);
+ } else if (mtype != MEM_MC1) {
memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
+ } else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -2844,8 +2849,6 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
@@ -3558,8 +3561,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- unsigned int fw_img_start = adap->params.sf_fw_start;
- unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+ unsigned int fw_start_sec = FLASH_FW_START_SEC;
+ unsigned int fw_size = FLASH_FW_MAX_SIZE;
+ unsigned int fw_start = FLASH_FW_START;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3579,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size differs from size in FW header\n");
return -EINVAL;
}
- if (size > FW_MAX_SIZE) {
+ if (size > fw_size) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
- FW_MAX_SIZE);
+ fw_size);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3608,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = fw_img_start;
+ addr = fw_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3622,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- fw_img_start + offsetof(struct fw_hdr, fw_ver),
+ fw_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
@@ -4924,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter)
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ return RSS_NENTRIES;
+ else
+ return T6_RSS_NENTRIES;
+}
+
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
@@ -5062,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
*/
int t4_read_rss(struct adapter *adapter, u16 *map)
{
+ int i, ret, nentries;
u32 val;
- int i, ret;
- for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ nentries = t4_chip_rss_size(adapter);
+ for (i = 0; i < nentries / 2; ++i) {
ret = rd_rss_row(adapter, i, &val);
if (ret)
return ret;
@@ -5077,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) || !adap->use_bd;
+ return (adap->flags & FW_OK) && !adap->use_bd;
}
/**
@@ -6072,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"CR2_QSFP",
"SFP28",
"KR_SFP28",
+ "KR_XLAUI"
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -6527,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
+ * @ctx_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
*/
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+ FW_LDST_ADDRSPC_SGE_EGRC :
+ FW_LDST_ADDRSPC_SGE_INGC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
@@ -7452,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -8492,22 +8615,6 @@ found:
return 0;
}
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
- u16 val;
- u32 pcie_cap;
-
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
- pci_read_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
- val |= range;
- pci_write_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -8593,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
- /* Set pci completion timeout value to 4 seconds. */
- set_pcie_completion_timeout(adapter, 0xd);
+ /* Set PCIe completion timeout to 4 seconds. */
+ pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
return 0;
}
@@ -9737,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
}
+
+/**
+ * t4_i2c_rd - read I2C data from adapter
+ * @adap: the adapter
+ * @port: Port number if per-port device; <0 if not
+ * @devid: per-port device ID or absolute device ID
+ * @offset: byte offset into device I2C space
+ * @len: byte length of I2C space data
+ * @buf: buffer in which to return I2C data
+ *
+ * Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf)
+{
+ struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+ unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+ int ret = 0;
+
+ if (len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ /* Dont allow reads that spans multiple pages */
+ if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+ ldst_cmd.u.i2c.did = devid;
+
+ while (len > 0) {
+ unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+ ldst_cmd.u.i2c.boffset = offset;
+ ldst_cmd.u.i2c.blen = i2c_len;
+
+ ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_rpl);
+ if (ret)
+ break;
+
+ memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+ offset += i2c_len;
+ buf += i2c_len;
+ len -= i2c_len;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_set_vlan_acl - Set a VLAN id for the specified VF
+ * @adapter: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @vf: one of the VFs instantiated by the specified PF
+ * @vlan: The vlanid to be set
+ */
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan)
+{
+ struct fw_acl_vlan_cmd vlan_cmd;
+ unsigned int enable;
+
+ enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
+ memset(&vlan_cmd, 0, sizeof(vlan_cmd));
+ vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F |
+ FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
+ FW_ACL_VLAN_CMD_VFN_V(vf));
+ vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+ /* Drop all packets that donot match vlan id */
+ vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+ if (enable != 0) {
+ vlan_cmd.nvlan = 1;
+ vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
+ }
+
+ return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed184356..361d5032c288 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
#include <linux/types.h>
enum {
- NCHAN = 4, /* # of HW channels */
- MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
- EEPROMSIZE = 17408, /* Serial EEPROM physical size */
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
- EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
- RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- NTX_SCHED = 8, /* # of HW Tx scheduling queues */
- PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7, /* # of PM stats in T6 */
- MBOX_LEN = 64, /* mailbox size in bytes */
- TRACE_LEN = 112, /* length of trace data and mask */
- FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408,/* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
};
enum {
@@ -70,7 +71,9 @@ enum {
/* SGE context types */
enum ctxt_type {
- CTXT_FLM = 2,
+ CTXT_EGRESS,
+ CTXT_INGRESS,
+ CTXT_FLM,
CTXT_CNM,
};
@@ -284,4 +287,14 @@ enum {
#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define I2C_PAGE_SIZE 0x100
+#define SFP_DIAG_TYPE_ADDR 0x5c
+#define SFP_DIAG_TYPE_LEN 0x1
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_COMP_LEN 0x1
+#define SFF_REV_ADDR 0x1
+#define SFF_REV_LEN 0x1
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e02de5d..51b18035d691 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
/* T6 adapters:
*/
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece72828..a6df73398d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
#define MYPORT_BASE 0x1c000
#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
@@ -961,6 +964,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define HMA_MUX_S 5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F HMA_MUX_V(1U)
+
#define EXT_MEM1_BASE_S 16
#define EXT_MEM1_BASE_M 0xfffU
#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,28 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
+#define MPS_RX_GENEVE_TYPE_A 0x11238
+
+#define GENEVE_EN_S 16
+#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S)
+#define GENEVE_EN_F GENEVE_EN_V(1U)
+
+#define GENEVE_S 0
+#define GENEVE_M 0xffffU
+#define GENEVE_V(x) ((x) << GENEVE_S)
+#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2559,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57eb4ad3485d..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -513,6 +513,13 @@ struct fw_ulptx_wr {
u64 cookie;
};
+#define FW_ULPTX_WR_DATA_S 28
+#define FW_ULPTX_WR_DATA_M 0x1
+#define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S)
+#define FW_ULPTX_WR_DATA_G(x) \
+ (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M)
+#define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U)
+
struct fw_tp_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
@@ -828,6 +835,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_MPS = 0x0020,
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+ FW_LDST_ADDRSPC_I2C = 0x0038,
};
enum fw_ldst_mps_fid {
@@ -2059,6 +2067,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2075,6 +2084,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2086,6 +2102,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2095,6 +2118,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2121,6 +2150,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
@@ -2325,14 +2360,22 @@ struct fw_acl_vlan_cmd {
#define FW_ACL_VLAN_CMD_VFN_S 0
#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
-#define FW_ACL_VLAN_CMD_EN_S 31
-#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_M 0x1
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_G(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M)
+#define FW_ACL_VLAN_CMD_EN_F FW_ACL_VLAN_CMD_EN_V(1U)
#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
-#define FW_ACL_VLAN_CMD_FM_S 6
-#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_M 0x1
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_G(x) \
+ (((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M)
+#define FW_ACL_VLAN_CMD_FM_F FW_ACL_VLAN_CMD_FM_V(1U)
/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
enum fw_port_cap {
@@ -2828,6 +2871,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_KR_SFP28,
+ FW_PORT_TYPE_KR_XLAUI,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 08c6ddb84a04..5883f09e3804 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -92,6 +92,7 @@ struct sge_rspq;
*/
struct port_info {
struct adapter *adapter; /* our adapter */
+ u32 vlan_id; /* vlan id for VST */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b48361cfdc78..b7e79e64d2ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -791,6 +791,8 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
goto err_unwind;
+ pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
+
netif_tx_start_all_queues(dev);
set_bit(pi->port_id, &adapter->open_device_map);
return 0;
@@ -1229,7 +1231,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -1323,6 +1326,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
SET_LMM(25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e673c656..dfce5df7538e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(qidx >= pi->nqsets);
txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+ if (pi->vlan_id && !skb_vlan_tag_present(skb))
+ __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+ pi->vlan_id);
+
/*
* Take this opportunity to reclaim any TX Descriptors whose DMA
* transfers have completed.
@@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
{
struct adapter *adapter = rxq->rspq.adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
int ret;
struct sk_buff *skb;
@@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ pi = netdev_priv(skb->dev);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
be16_to_cpu(pkt->vlan));
rxq->stats.vlan_ex++;
@@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
struct adapter *adapter = rspq->adapter;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
+ pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
if (csum_ok && !pkt->err_vec &&
@@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
- if (pkt->vlan_ex) {
+ if (pkt->vlan_ex && !pi->vlan_id) {
rxq->stats.vlan_ex++;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(pkt->vlan));
}
netif_receive_skb(skb);
@@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter)
int t4vf_sge_init(struct adapter *adapter)
{
struct sge_params *sge_params = &adapter->params.sge;
- u32 fl0 = sge_params->sge_fl_buffer_size[0];
- u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+ u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
/*
@@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter)
* the Physical Function Driver. Ideally we should be able to deal
* with _any_ configuration. Practice is different ...
*/
- if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
- fl0, fl1);
+ fl_small_pg, fl_large_pg);
return -EINVAL;
}
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Now translate the adapter parameters into our internal forms.
*/
- if (fl1)
- s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9cf9c56b0f73..712e8f0c71b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -413,5 +413,6 @@ int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
int t4vf_prep_adapter(struct adapter *);
int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
unsigned int *naddr, u8 *addr);
+int t4vf_get_vf_vlan_acl(struct adapter *adapter);
#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 67aec59a14e6..798695bf8678 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -2147,3 +2147,31 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
return ret;
}
+
+/**
+ * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to
+ * the VI of this VF.
+ * @adapter: The adapter
+ *
+ * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_vlan_acl(struct adapter *adapter)
+{
+ struct fw_acl_vlan_cmd cmd;
+ int vlan = 0;
+ int ret = 0;
+
+ cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+
+ if (!ret)
+ vlan = be16_to_cpu(cmd.vlanid[0]);
+
+ return vlan;
+}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a95130b..b3e7fafee3df 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index f910f0f386d6..977d4c2c759d 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -187,6 +187,7 @@ struct net_device * __init mac89x0_probe(int unit)
unsigned long ioaddr;
unsigned short sig;
int err = -ENODEV;
+ struct nubus_rsrc *fres;
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
@@ -207,8 +208,9 @@ struct net_device * __init mac89x0_probe(int unit)
/* We might have to parameterize this later */
slot = 0xE;
/* Get out now if there's a real NuBus card in slot E */
- if (nubus_find_slot(slot, NULL) != NULL)
- goto out;
+ for_each_func_rsrc(fres)
+ if (fres->board->slot == slot)
+ goto out;
/* The pseudo-ISA bits always live at offset 0x300 (gee,
wonder why...) */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 6a9527004cb1..9b218f0e5a4c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_NAPI_BUDGET 256
+
#define ENIC_AIC_LARGE_PKT_DIFF 3
struct enic_msix_entry {
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 462d0ce51240..efb9333c7cf8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
#include "enic_res.h"
#include "enic.h"
@@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
return __enic_set_rsskey(enic);
}
+static int enic_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
@@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
+ .get_ts_info = enic_get_ts_info,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e130fb757e7b..f202ba72a811 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
+ skb_tx_timestamp(skb);
if (!skb->xmit_more || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
@@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int cq_wq = enic_cq_wq(enic, 0);
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
- unsigned int wq_work_to_do = -1; /* no limit */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
@@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct vnic_wq *wq = &enic->wq[wq_index];
unsigned int cq;
unsigned int intr;
- unsigned int wq_work_to_do = -1; /* clean all desc possible */
+ unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int wq_work_done;
unsigned int wq_irq;
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..89bc4579724d
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Cortina ethernet devices
+
+config NET_VENDOR_CORTINA
+ bool "Cortina Gemini devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_CORTINA
+
+config GEMINI_ETHERNET
+ tristate "Gemini Gigabit Ethernet support"
+ depends on OF
+ depends on HAS_IOMEM
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
+
+endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Cortina Gemini network device drivers.
+
+obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Ethernet device driver for Cortina Systems Gemini SoC
+ * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
+ * Net Engine and Gigabit Ethernet MAC (GMAC)
+ * This hardware contains a TCP Offload Engine (TOE) but currently the
+ * driver does not make use of it.
+ *
+ * Authors:
+ * Linus Walleij <linus.walleij@linaro.org>
+ * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
+ * Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Paulius Zaleckas <paulius.zaleckas@gmail.com>
+ * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
+ * Gary Chen & Ch Hsu Storlink Semiconductor
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include "gemini.h"
+
+#define DRV_NAME "gmac-gemini"
+#define DRV_VERSION "1.0"
+
+#define HSIZE_8 0x00
+#define HSIZE_16 0x01
+#define HSIZE_32 0x02
+
+#define HBURST_SINGLE 0x00
+#define HBURST_INCR 0x01
+#define HBURST_INCR4 0x02
+#define HBURST_INCR8 0x03
+
+#define HPROT_DATA_CACHE BIT(0)
+#define HPROT_PRIVILIGED BIT(1)
+#define HPROT_BUFFERABLE BIT(2)
+#define HPROT_CACHABLE BIT(3)
+
+#define DEFAULT_RX_COALESCE_NSECS 0
+#define DEFAULT_GMAC_RXQ_ORDER 9
+#define DEFAULT_GMAC_TXQ_ORDER 8
+#define DEFAULT_RX_BUF_ORDER 11
+#define DEFAULT_NAPI_WEIGHT 64
+#define TX_MAX_FRAGS 16
+#define TX_QUEUE_NUM 1 /* max: 6 */
+#define RX_MAX_ALLOC_ORDER 2
+
+#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
+ GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
+#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
+ GMAC0_SWTQ00_FIN_INT_BIT)
+#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
+
+#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+/**
+ * struct gmac_queue_page - page buffer per-page info
+ */
+struct gmac_queue_page {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct gmac_txq {
+ struct gmac_txdesc *ring;
+ struct sk_buff **skb;
+ unsigned int cptr;
+ unsigned int noirq_packets;
+};
+
+struct gemini_ethernet;
+
+struct gemini_ethernet_port {
+ u8 id; /* 0 or 1 */
+
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct device *dev;
+ void __iomem *dma_base;
+ void __iomem *gmac_base;
+ struct clk *pclk;
+ struct reset_control *reset;
+ int irq;
+ __le32 mac_addr[3];
+
+ void __iomem *rxq_rwptr;
+ struct gmac_rxdesc *rxq_ring;
+ unsigned int rxq_order;
+
+ struct napi_struct napi;
+ struct hrtimer rx_coalesce_timer;
+ unsigned int rx_coalesce_nsecs;
+ unsigned int freeq_refill;
+ struct gmac_txq txq[TX_QUEUE_NUM];
+ unsigned int txq_order;
+ unsigned int irq_every_tx_packets;
+
+ dma_addr_t rxq_dma_base;
+ dma_addr_t txq_dma_base;
+
+ unsigned int msg_enable;
+ spinlock_t config_lock; /* Locks config register */
+
+ struct u64_stats_sync tx_stats_syncp;
+ struct u64_stats_sync rx_stats_syncp;
+ struct u64_stats_sync ir_stats_syncp;
+
+ struct rtnl_link_stats64 stats;
+ u64 hw_stats[RX_STATS_NUM];
+ u64 rx_stats[RX_STATUS_NUM];
+ u64 rx_csum_stats[RX_CHKSUM_NUM];
+ u64 rx_napi_exits;
+ u64 tx_frag_stats[TX_MAX_FRAGS];
+ u64 tx_frags_linearized;
+ u64 tx_hw_csummed;
+};
+
+struct gemini_ethernet {
+ struct device *dev;
+ void __iomem *base;
+ struct gemini_ethernet_port *port0;
+ struct gemini_ethernet_port *port1;
+
+ spinlock_t irq_lock; /* Locks IRQ-related registers */
+ unsigned int freeq_order;
+ unsigned int freeq_frag_order;
+ struct gmac_rxdesc *freeq_ring;
+ dma_addr_t freeq_dma_base;
+ struct gmac_queue_page *freeq_pages;
+ unsigned int num_freeq_pages;
+ spinlock_t freeq_lock; /* Locks queue from reentrance */
+};
+
+#define GMAC_STATS_NUM ( \
+ RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
+ TX_MAX_FRAGS + 2)
+
+static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
+ "GMAC_IN_DISCARDS",
+ "GMAC_IN_ERRORS",
+ "GMAC_IN_MCAST",
+ "GMAC_IN_BCAST",
+ "GMAC_IN_MAC1",
+ "GMAC_IN_MAC2",
+ "RX_STATUS_GOOD_FRAME",
+ "RX_STATUS_TOO_LONG_GOOD_CRC",
+ "RX_STATUS_RUNT_FRAME",
+ "RX_STATUS_SFD_NOT_FOUND",
+ "RX_STATUS_CRC_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_CRC",
+ "RX_STATUS_ALIGNMENT_ERROR",
+ "RX_STATUS_TOO_LONG_BAD_ALIGN",
+ "RX_STATUS_RX_ERR",
+ "RX_STATUS_DA_FILTERED",
+ "RX_STATUS_BUFFER_FULL",
+ "RX_STATUS_11",
+ "RX_STATUS_12",
+ "RX_STATUS_13",
+ "RX_STATUS_14",
+ "RX_STATUS_15",
+ "RX_CHKSUM_IP_UDP_TCP_OK",
+ "RX_CHKSUM_IP_OK_ONLY",
+ "RX_CHKSUM_NONE",
+ "RX_CHKSUM_3",
+ "RX_CHKSUM_IP_ERR_UNKNOWN",
+ "RX_CHKSUM_IP_ERR",
+ "RX_CHKSUM_TCP_UDP_ERR",
+ "RX_CHKSUM_7",
+ "RX_NAPI_EXITS",
+ "TX_FRAGS[1]",
+ "TX_FRAGS[2]",
+ "TX_FRAGS[3]",
+ "TX_FRAGS[4]",
+ "TX_FRAGS[5]",
+ "TX_FRAGS[6]",
+ "TX_FRAGS[7]",
+ "TX_FRAGS[8]",
+ "TX_FRAGS[9]",
+ "TX_FRAGS[10]",
+ "TX_FRAGS[11]",
+ "TX_FRAGS[12]",
+ "TX_FRAGS[13]",
+ "TX_FRAGS[14]",
+ "TX_FRAGS[15]",
+ "TX_FRAGS[16+]",
+ "TX_FRAGS_LINEARIZED",
+ "TX_HW_CSUMMED",
+};
+
+static void gmac_dump_dma_state(struct net_device *netdev);
+
+static void gmac_update_config0_reg(struct net_device *netdev,
+ u32 val, u32 vmask)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = (reg & ~vmask) | val;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_enable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg &= ~CONFIG0_TX_RX_DISABLE;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_disable_tx_rx(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val |= CONFIG0_TX_RX_DISABLE;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+
+ mdelay(10); /* let GMAC consume packet */
+}
+
+static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ val = readl(port->gmac_base + GMAC_CONFIG0);
+ val &= ~CONFIG0_FLOW_CTL;
+ if (tx)
+ val |= CONFIG0_FLOW_TX;
+ if (rx)
+ val |= CONFIG0_FLOW_RX;
+ writel(val, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+}
+
+static void gmac_speed_set(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ union gmac_status status, old_status;
+ int pause_tx = 0;
+ int pause_rx = 0;
+
+ status.bits32 = readl(port->gmac_base + GMAC_STATUS);
+ old_status.bits32 = status.bits32;
+ status.bits.link = phydev->link;
+ status.bits.duplex = phydev->duplex;
+
+ switch (phydev->speed) {
+ case 1000:
+ status.bits.speed = GMAC_SPEED_1000;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
+ netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+ break;
+ case 100:
+ status.bits.speed = GMAC_SPEED_100;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+ break;
+ case 10:
+ status.bits.speed = GMAC_SPEED_10;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+ break;
+ default:
+ netdev_warn(netdev, "Not supported PHY speed (%d)\n",
+ phydev->speed);
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phydev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phydev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause_rx = 1;
+ if (cap & FLOW_CTRL_TX)
+ pause_tx = 1;
+ }
+
+ gmac_set_flow_control(netdev, pause_tx, pause_rx);
+
+ if (old_status.bits32 == status.bits32)
+ return;
+
+ if (netif_msg_link(port)) {
+ phy_print_status(phydev);
+ netdev_info(netdev, "link flow control: %s\n",
+ phydev->pause
+ ? (phydev->asym_pause ? "tx" : "both")
+ : (phydev->asym_pause ? "rx" : "none")
+ );
+ }
+
+ gmac_disable_tx_rx(netdev);
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+ gmac_enable_tx_rx(netdev);
+}
+
+static int gmac_setup_phy(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_status status = { .bits32 = 0 };
+ struct device *dev = port->dev;
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(netdev,
+ dev->of_node,
+ gmac_speed_set);
+ if (!phy)
+ return -ENODEV;
+ netdev->phydev = phy;
+
+ netdev_info(netdev, "connected to PHY \"%s\"\n",
+ phydev_name(phy));
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ phy->supported &= PHY_GBIT_FEATURES;
+ phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+ phy->advertising = phy->supported;
+
+ /* set PHY interface type */
+ switch (phy->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_MII;
+ netdev_info(netdev, "connect to MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+ status.bits.mii_rmii = GMAC_PHY_GMII;
+ netdev_info(netdev, "connect to GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+ status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
+ netdev_info(netdev, "connect to RGMII\n");
+ break;
+ default:
+ netdev_err(netdev, "Unsupported MII interface\n");
+ phy_disconnect(phy);
+ netdev->phydev = NULL;
+ return -EINVAL;
+ }
+ writel(status.bits32, port->gmac_base + GMAC_STATUS);
+
+ return 0;
+}
+
+static int gmac_pick_rx_max_len(int max_l3_len)
+{
+ /* index = CONFIG_MAXLEN_XXX values */
+ static const int max_len[8] = {
+ 1536, 1518, 1522, 1542,
+ 9212, 10236, 1518, 1518
+ };
+ int i, n = 5;
+
+ max_l3_len += ETH_HLEN + VLAN_HLEN;
+
+ if (max_l3_len > max_len[n])
+ return -1;
+
+ for (i = 0; i < 5; i++) {
+ if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
+ n = i;
+ }
+
+ return n;
+}
+
+static int gmac_init(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0 = { .bits = {
+ .dis_tx = 1,
+ .dis_rx = 1,
+ .ipv4_rx_chksum = 1,
+ .ipv6_rx_chksum = 1,
+ .rx_err_detect = 1,
+ .rgmm_edge = 1,
+ .port0_chk_hwq = 1,
+ .port1_chk_hwq = 1,
+ .port0_chk_toeq = 1,
+ .port1_chk_toeq = 1,
+ .port0_chk_classq = 1,
+ .port1_chk_classq = 1,
+ } };
+ union gmac_ahb_weight ahb_weight = { .bits = {
+ .rx_weight = 1,
+ .tx_weight = 1,
+ .hash_weight = 1,
+ .pre_req = 0x1f,
+ .tq_dv_threshold = 0,
+ } };
+ union gmac_tx_wcr0 hw_weigh = { .bits = {
+ .hw_tq3 = 1,
+ .hw_tq2 = 1,
+ .hw_tq1 = 1,
+ .hw_tq0 = 1,
+ } };
+ union gmac_tx_wcr1 sw_weigh = { .bits = {
+ .sw_tq5 = 1,
+ .sw_tq4 = 1,
+ .sw_tq3 = 1,
+ .sw_tq2 = 1,
+ .sw_tq1 = 1,
+ .sw_tq0 = 1,
+ } };
+ union gmac_config1 config1 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 24,
+ } };
+ union gmac_config2 config2 = { .bits = {
+ .set_threshold = 16,
+ .rel_threshold = 32,
+ } };
+ union gmac_config3 config3 = { .bits = {
+ .set_threshold = 0,
+ .rel_threshold = 0,
+ } };
+ union gmac_config0 tmp;
+ u32 val;
+
+ config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
+ tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ config0.bits.reserved = tmp.bits.reserved;
+ writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
+ writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
+ writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
+ writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
+
+ val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
+
+ writel(hw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
+ writel(sw_weigh.bits32,
+ port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
+
+ port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
+ port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
+ port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
+
+ /* Mark every quarter of the queue a packet for interrupt
+ * in order to be able to wake up the queue if it was stopped
+ */
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+
+ return 0;
+}
+
+static void gmac_uninit(struct net_device *netdev)
+{
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
+}
+
+static int gmac_setup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ size_t entries = 1 << port->txq_order;
+ struct gmac_txq *txq = port->txq;
+ struct gmac_txdesc *desc_ring;
+ size_t len = n_txq * entries;
+ struct sk_buff **skb_tab;
+ void __iomem *rwptr_reg;
+ unsigned int r;
+ int i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
+ if (!skb_tab)
+ return -ENOMEM;
+
+ desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
+ &port->txq_dma_base, GFP_KERNEL);
+
+ if (!desc_ring) {
+ kfree(skb_tab);
+ return -ENOMEM;
+ }
+
+ if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "TX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->txq_dma_base | port->txq_order,
+ port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ for (i = 0; i < n_txq; i++) {
+ txq->ring = desc_ring;
+ txq->skb = skb_tab;
+ txq->noirq_packets = 0;
+
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+ txq->cptr = r;
+
+ txq++;
+ desc_ring += entries;
+ skb_tab += entries;
+ }
+
+ return 0;
+}
+
+static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
+ unsigned int r)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int m = (1 << port->txq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int c = txq->cptr;
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ unsigned int hwchksum = 0;
+ unsigned long bytes = 0;
+ struct gmac_txdesc *txd;
+ unsigned short nfrags;
+ unsigned int errs = 0;
+ unsigned int pkts = 0;
+ unsigned int word3;
+ dma_addr_t mapping;
+
+ if (c == r)
+ return;
+
+ while (c != r) {
+ txd = txq->ring + c;
+ word0 = txd->word0;
+ word1 = txd->word1;
+ mapping = txd->word2.buf_adr;
+ word3 = txd->word3.bits32;
+
+ dma_unmap_single(geth->dev, mapping,
+ word0.bits.buffer_size, DMA_TO_DEVICE);
+
+ if (word3 & EOF_BIT)
+ dev_kfree_skb(txq->skb[c]);
+
+ c++;
+ c &= m;
+
+ if (!(word3 & SOF_BIT))
+ continue;
+
+ if (!word0.bits.status_tx_ok) {
+ errs++;
+ continue;
+ }
+
+ pkts++;
+ bytes += txd->word1.bits.byte_count;
+
+ if (word1.bits32 & TSS_CHECKUM_ENABLE)
+ hwchksum++;
+
+ nfrags = word0.bits.desc_count - 1;
+ if (nfrags) {
+ if (nfrags >= TX_MAX_FRAGS)
+ nfrags = TX_MAX_FRAGS - 1;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frag_stats[nfrags]++;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ }
+ }
+
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ port->stats.tx_errors += errs;
+ port->stats.tx_packets += pkts;
+ port->stats.tx_bytes += bytes;
+ port->tx_hw_csummed += hwchksum;
+ u64_stats_update_end(&port->ir_stats_syncp);
+
+ txq->cptr = c;
+}
+
+static void gmac_cleanup_txqs(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int n_txq = netdev->num_tx_queues;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *rwptr_reg;
+ unsigned int r, i;
+
+ rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ for (i = 0; i < n_txq; i++) {
+ r = readw(rwptr_reg);
+ rwptr_reg += 2;
+ writew(r, rwptr_reg);
+ rwptr_reg += 2;
+
+ gmac_clean_txq(netdev, port->txq + i, r);
+ }
+ writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
+
+ kfree(port->txq->skb);
+ dma_free_coherent(geth->dev,
+ n_txq * sizeof(*port->txq->ring) << port->txq_order,
+ port->txq->ring, port->txq_dma_base);
+}
+
+static int gmac_setup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct nontoe_qhdr __iomem *qhdr;
+
+ qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ port->rxq_rwptr = &qhdr->word1;
+
+ /* Remap a slew of memory to use for the RX queue */
+ port->rxq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*port->rxq_ring) << port->rxq_order,
+ &port->rxq_dma_base, GFP_KERNEL);
+ if (!port->rxq_ring)
+ return -ENOMEM;
+ if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
+ dev_warn(geth->dev, "RX queue base it not aligned\n");
+ return -ENOMEM;
+ }
+
+ writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
+ writel(0, port->rxq_rwptr);
+ return 0;
+}
+
+static struct gmac_queue_page *
+gmac_get_queue_page(struct gemini_ethernet *geth,
+ struct gemini_ethernet_port *port,
+ dma_addr_t addr)
+{
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+ int i;
+
+ /* Only look for even pages */
+ mapping = addr & PAGE_MASK;
+
+ if (!geth->freeq_pages) {
+ dev_err(geth->dev, "try to get page with no page list\n");
+ return NULL;
+ }
+
+ /* Look up a ring buffer page from virtual mapping */
+ for (i = 0; i < geth->num_freeq_pages; i++) {
+ gpage = &geth->freeq_pages[i];
+ if (gpage->mapping == mapping)
+ return gpage;
+ }
+
+ return NULL;
+}
+
+static void gmac_cleanup_rxq(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ struct gmac_rxdesc *rxd = port->rxq_ring;
+ static struct gmac_queue_page *gpage;
+ struct nontoe_qhdr __iomem *qhdr;
+ void __iomem *dma_reg;
+ void __iomem *ptr_reg;
+ dma_addr_t mapping;
+ union dma_rwptr rw;
+ unsigned int r, w;
+
+ qhdr = geth->base +
+ TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
+ dma_reg = &qhdr->word0;
+ ptr_reg = &qhdr->word1;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+ writew(r, ptr_reg + 2);
+
+ writel(0, dma_reg);
+
+ /* Loop from read pointer to write pointer of the RX queue
+ * and free up all pages by the queue.
+ */
+ while (r != w) {
+ mapping = rxd[r].word2.buf_adr;
+ r++;
+ r &= ((1 << port->rxq_order) - 1);
+
+ if (!mapping)
+ continue;
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find page\n");
+ continue;
+ }
+ /* Release the RX queue reference to the page */
+ put_page(gpage->page);
+ }
+
+ dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
+ port->rxq_ring, port->rxq_dma_base);
+}
+
+static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
+ int pn)
+{
+ struct gmac_rxdesc *freeq_entry;
+ struct gmac_queue_page *gpage;
+ unsigned int fpp_order;
+ unsigned int frag_len;
+ dma_addr_t mapping;
+ struct page *page;
+ int i;
+
+ /* First allocate and DMA map a single page */
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ mapping = dma_map_single(geth->dev, page_address(page),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping)) {
+ put_page(page);
+ return NULL;
+ }
+
+ /* The assign the page mapping (physical address) to the buffer address
+ * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
+ * 4k), and the default RX frag order is 11 (fragments are up 20 2048
+ * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
+ * each page normally needs two entries in the queue.
+ */
+ frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
+ fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ freeq_entry = geth->freeq_ring + (pn << fpp_order);
+ dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
+ pn, frag_len, (1 << fpp_order), freeq_entry);
+ for (i = (1 << fpp_order); i > 0; i--) {
+ freeq_entry->word2.buf_adr = mapping;
+ freeq_entry++;
+ mapping += frag_len;
+ }
+
+ /* If the freeq entry already has a page mapped, then unmap it. */
+ gpage = &geth->freeq_pages[pn];
+ if (gpage->page) {
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ /* This should be the last reference to the page so it gets
+ * released
+ */
+ put_page(gpage->page);
+ }
+
+ /* Then put our new mapping into the page table */
+ dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
+ pn, (unsigned int)mapping, page);
+ gpage->mapping = mapping;
+ gpage->page = page;
+
+ return page;
+}
+
+/**
+ * geth_fill_freeq() - Fill the freeq with empty fragments to use
+ * @geth: the ethernet adapter
+ * @refill: whether to reset the queue by filling in all freeq entries or
+ * just refill it, usually the interrupt to refill the queue happens when
+ * the queue is half empty.
+ */
+static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int count = 0;
+ unsigned int pn, epn;
+ unsigned long flags;
+ union dma_rwptr rw;
+ unsigned int m_pn;
+
+ /* Mask for page */
+ m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
+
+ spin_lock_irqsave(&geth->freeq_lock, flags);
+
+ rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
+ epn = (rw.bits.rptr >> fpp_order) - 1;
+ epn &= m_pn;
+
+ /* Loop over the freeq ring buffer entries */
+ while (pn != epn) {
+ struct gmac_queue_page *gpage;
+ struct page *page;
+
+ gpage = &geth->freeq_pages[pn];
+ page = gpage->page;
+
+ dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
+ pn, page_ref_count(page), 1 << fpp_order);
+
+ if (page_ref_count(page) > 1) {
+ unsigned int fl = (pn - epn) & m_pn;
+
+ if (fl > 64 >> fpp_order)
+ break;
+
+ page = geth_freeq_alloc_map_page(geth, pn);
+ if (!page)
+ break;
+ }
+
+ /* Add one reference per fragment in the page */
+ page_ref_add(page, 1 << fpp_order);
+ count += 1 << fpp_order;
+ pn++;
+ pn &= m_pn;
+ }
+
+ writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+
+ spin_unlock_irqrestore(&geth->freeq_lock, flags);
+
+ return count;
+}
+
+static int geth_setup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ union queue_threshold qt;
+ union dma_skb_size skbsz;
+ unsigned int filled;
+ unsigned int pn;
+
+ geth->freeq_ring = dma_alloc_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ &geth->freeq_dma_base, GFP_KERNEL);
+ if (!geth->freeq_ring)
+ return -ENOMEM;
+ if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
+ dev_warn(geth->dev, "queue ring base it not aligned\n");
+ goto err_freeq;
+ }
+
+ /* Allocate a mapping to page look-up index */
+ geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
+ GFP_KERNEL);
+ if (!geth->freeq_pages)
+ goto err_freeq;
+ geth->num_freeq_pages = pages;
+
+ dev_info(geth->dev, "allocate %d pages for queue\n", pages);
+ for (pn = 0; pn < pages; pn++)
+ if (!geth_freeq_alloc_map_page(geth, pn))
+ goto err_freeq_alloc;
+
+ filled = geth_fill_freeq(geth, false);
+ if (!filled)
+ goto err_freeq_alloc;
+
+ qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+ qt.bits.swfq_empty = 32;
+ writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
+
+ skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
+ writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
+ writel(geth->freeq_dma_base | geth->freeq_order,
+ geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ return 0;
+
+err_freeq_alloc:
+ while (pn > 0) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ --pn;
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+ gpage = &geth->freeq_pages[pn];
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+err_freeq:
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+ geth->freeq_ring = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
+ * @geth: the Gemini global ethernet state
+ */
+static void geth_cleanup_freeq(struct gemini_ethernet *geth)
+{
+ unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
+ unsigned int frag_len = 1 << geth->freeq_frag_order;
+ unsigned int len = 1 << geth->freeq_order;
+ unsigned int pages = len >> fpp_order;
+ unsigned int pn;
+
+ writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
+ geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+
+ for (pn = 0; pn < pages; pn++) {
+ struct gmac_queue_page *gpage;
+ dma_addr_t mapping;
+
+ mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
+ dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
+
+ gpage = &geth->freeq_pages[pn];
+ while (page_ref_count(gpage->page) > 0)
+ put_page(gpage->page);
+ }
+
+ kfree(geth->freeq_pages);
+
+ dma_free_coherent(geth->dev,
+ sizeof(*geth->freeq_ring) << geth->freeq_order,
+ geth->freeq_ring, geth->freeq_dma_base);
+}
+
+/**
+ * geth_resize_freeq() - resize the software queue depth
+ * @port: the port requesting the change
+ *
+ * This gets called at least once during probe() so the device queue gets
+ * "resized" from the hardware defaults. Since both ports/net devices share
+ * the same hardware queue, some synchronization between the ports is
+ * needed.
+ */
+static int geth_resize_freeq(struct gemini_ethernet_port *port)
+{
+ struct gemini_ethernet *geth = port->geth;
+ struct net_device *netdev = port->netdev;
+ struct gemini_ethernet_port *other_port;
+ struct net_device *other_netdev;
+ unsigned int new_size = 0;
+ unsigned int new_order;
+ unsigned long flags;
+ u32 en;
+ int ret;
+
+ if (netdev->dev_id == 0)
+ other_netdev = geth->port1->netdev;
+ else
+ other_netdev = geth->port0->netdev;
+
+ if (other_netdev && netif_running(other_netdev))
+ return -EBUSY;
+
+ new_size = 1 << (port->rxq_order + 1);
+ netdev_dbg(netdev, "port %d size: %d order %d\n",
+ netdev->dev_id,
+ new_size,
+ port->rxq_order);
+ if (other_netdev) {
+ other_port = netdev_priv(other_netdev);
+ new_size += 1 << (other_port->rxq_order + 1);
+ netdev_dbg(other_netdev, "port %d size: %d order %d\n",
+ other_netdev->dev_id,
+ (1 << (other_port->rxq_order + 1)),
+ other_port->rxq_order);
+ }
+
+ new_order = min(15, ilog2(new_size - 1) + 1);
+ dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
+ new_size, new_order);
+ if (geth->freeq_order == new_order)
+ return 0;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ /* Disable the software queue IRQs */
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ en &= ~SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ /* Drop the old queue */
+ if (geth->freeq_ring)
+ geth_cleanup_freeq(geth);
+
+ /* Allocate a new queue with the desired order */
+ geth->freeq_order = new_order;
+ ret = geth_setup_freeq(geth);
+
+ /* Restart the interrupts - NOTE if this is the first resize
+ * after probe(), this is where the interrupts get turned on
+ * in the first place.
+ */
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ en |= SWFQ_EMPTY_INT_BIT;
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return ret;
+}
+
+static void gmac_tx_irq_enable(struct net_device *netdev,
+ unsigned int txq, int en)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+
+ mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
+
+ if (en)
+ writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = en ? val | mask : val & ~mask;
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+}
+
+static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
+{
+ struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
+
+ gmac_tx_irq_enable(netdev, txq_num, 0);
+ netif_tx_wake_queue(ntxq);
+}
+
+static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
+ struct gmac_txq *txq, unsigned short *desc)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct skb_shared_info *skb_si = skb_shinfo(skb);
+ unsigned short m = (1 << port->txq_order) - 1;
+ short frag, last_frag = skb_si->nr_frags - 1;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int word1, word3, buflen;
+ unsigned short w = *desc;
+ struct gmac_txdesc *txd;
+ skb_frag_t *skb_frag;
+ dma_addr_t mapping;
+ unsigned short mtu;
+ void *buffer;
+
+ mtu = ETH_HLEN;
+ mtu += netdev->mtu;
+ if (skb->protocol == htons(ETH_P_8021Q))
+ mtu += VLAN_HLEN;
+
+ word1 = skb->len;
+ word3 = SOF_BIT;
+
+ if (word1 > mtu) {
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mtu;
+ }
+
+ if (skb->ip_summed != CHECKSUM_NONE) {
+ int tcp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ word1 |= TSS_IP_CHKSUM_BIT;
+ tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
+ } else { /* IPv6 */
+ word1 |= TSS_IPV6_ENABLE_BIT;
+ tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
+ }
+
+ word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
+ }
+
+ frag = -1;
+ while (frag <= last_frag) {
+ if (frag == -1) {
+ buffer = skb->data;
+ buflen = skb_headlen(skb);
+ } else {
+ skb_frag = skb_si->frags + frag;
+ buffer = page_address(skb_frag_page(skb_frag)) +
+ skb_frag->page_offset;
+ buflen = skb_frag->size;
+ }
+
+ if (frag == last_frag) {
+ word3 |= EOF_BIT;
+ txq->skb[w] = skb;
+ }
+
+ mapping = dma_map_single(geth->dev, buffer, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(geth->dev, mapping))
+ goto map_error;
+
+ txd = txq->ring + w;
+ txd->word0.bits32 = buflen;
+ txd->word1.bits32 = word1;
+ txd->word2.buf_adr = mapping;
+ txd->word3.bits32 = word3;
+
+ word3 &= MTU_SIZE_BIT_MASK;
+ w++;
+ w &= m;
+ frag++;
+ }
+
+ *desc = w;
+ return 0;
+
+map_error:
+ while (w != *desc) {
+ w--;
+ w &= m;
+
+ dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
+ txq->ring[w].word0.bits.buffer_size,
+ DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->txq_order) - 1;
+ struct netdev_queue *ntxq;
+ unsigned short r, w, d;
+ void __iomem *ptr_reg;
+ struct gmac_txq *txq;
+ int txq_num, nfrags;
+ union dma_rwptr rw;
+
+ SKB_FRAG_ASSERT(skb);
+
+ if (skb->len >= 0x10000)
+ goto out_drop_free;
+
+ txq_num = skb_get_queue_mapping(skb);
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
+ txq = &port->txq[txq_num];
+ ntxq = netdev_get_tx_queue(netdev, txq_num);
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ rw.bits32 = readl(ptr_reg);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ gmac_clean_txq(netdev, txq, r);
+ d = txq->cptr - w - 1;
+ d &= m;
+
+ if (d < nfrags + 2) {
+ netif_tx_stop_queue(ntxq);
+
+ d = txq->cptr + nfrags + 16;
+ d &= m;
+ txq->ring[d].word3.bits.eofie = 1;
+ gmac_tx_irq_enable(netdev, txq_num, 1);
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ netdev->stats.tx_fifo_errors++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
+ if (skb_linearize(skb))
+ goto out_drop;
+
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->tx_frags_linearized++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+
+ if (gmac_map_tx_bufs(netdev, skb, txq, &w))
+ goto out_drop_free;
+ }
+
+ writew(w, ptr_reg + 2);
+
+ gmac_clean_txq(netdev, txq, r);
+ return NETDEV_TX_OK;
+
+out_drop_free:
+ dev_kfree_skb(skb);
+out_drop:
+ u64_stats_update_begin(&port->tx_stats_syncp);
+ port->stats.tx_dropped++;
+ u64_stats_update_end(&port->tx_stats_syncp);
+ return NETDEV_TX_OK;
+}
+
+static void gmac_tx_timeout(struct net_device *netdev)
+{
+ netdev_err(netdev, "Tx timeout\n");
+ gmac_dump_dma_state(netdev);
+}
+
+static void gmac_enable_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_info(netdev, "%s device %d %s\n", __func__,
+ netdev->dev_id, enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
+ mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+ u32 val, mask;
+
+ netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
+ enable ? "enable" : "disable");
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ val = enable ? (val | mask) : (val & ~mask);
+ writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
+ union gmac_rxdesc_0 word0,
+ unsigned int frame_len)
+{
+ unsigned int rx_csum = word0.bits.chksum_status;
+ unsigned int rx_status = word0.bits.status;
+ struct sk_buff *skb = NULL;
+
+ port->rx_stats[rx_status]++;
+ port->rx_csum_stats[rx_csum]++;
+
+ if (word0.bits.derr || word0.bits.perr ||
+ rx_status || frame_len < ETH_ZLEN ||
+ rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
+ port->stats.rx_errors++;
+
+ if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
+ port->stats.rx_length_errors++;
+ if (RX_ERROR_OVER(rx_status))
+ port->stats.rx_over_errors++;
+ if (RX_ERROR_CRC(rx_status))
+ port->stats.rx_crc_errors++;
+ if (RX_ERROR_FRAME(rx_status))
+ port->stats.rx_frame_errors++;
+ return NULL;
+ }
+
+ skb = napi_get_frags(&port->napi);
+ if (!skb)
+ goto update_exit;
+
+ if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+update_exit:
+ port->stats.rx_bytes += frame_len;
+ port->stats.rx_packets++;
+ return skb;
+}
+
+static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned short m = (1 << port->rxq_order) - 1;
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg = port->rxq_rwptr;
+ unsigned int frame_len, frag_len;
+ struct gmac_rxdesc *rx = NULL;
+ struct gmac_queue_page *gpage;
+ static struct sk_buff *skb;
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_3 word3;
+ struct page *page = NULL;
+ unsigned int page_offs;
+ unsigned short r, w;
+ union dma_rwptr rw;
+ dma_addr_t mapping;
+ int frag_nr = 0;
+
+ rw.bits32 = readl(ptr_reg);
+ /* Reset interrupt as all packages until here are taken into account */
+ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
+ geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ r = rw.bits.rptr;
+ w = rw.bits.wptr;
+
+ while (budget && w != r) {
+ rx = port->rxq_ring + r;
+ word0 = rx->word0;
+ word1 = rx->word1;
+ mapping = rx->word2.buf_adr;
+ word3 = rx->word3;
+
+ r++;
+ r &= m;
+
+ frag_len = word0.bits.buffer_size;
+ frame_len = word1.bits.byte_count;
+ page_offs = mapping & ~PAGE_MASK;
+
+ if (!mapping) {
+ netdev_err(netdev,
+ "rxq[%u]: HW BUG: zero DMA desc\n", r);
+ goto err_drop;
+ }
+
+ /* Freeq pointers are one page off */
+ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
+ if (!gpage) {
+ dev_err(geth->dev, "could not find mapping\n");
+ continue;
+ }
+ page = gpage->page;
+
+ if (word3.bits32 & SOF_BIT) {
+ if (skb) {
+ napi_free_frags(&port->napi);
+ port->stats.rx_dropped++;
+ }
+
+ skb = gmac_skb_if_good_frame(port, word0, frame_len);
+ if (!skb)
+ goto err_drop;
+
+ page_offs += NET_IP_ALIGN;
+ frag_len -= NET_IP_ALIGN;
+ frag_nr = 0;
+
+ } else if (!skb) {
+ put_page(page);
+ continue;
+ }
+
+ if (word3.bits32 & EOF_BIT)
+ frag_len = frame_len - skb->len;
+
+ /* append page frag to skb */
+ if (frag_nr == MAX_SKB_FRAGS)
+ goto err_drop;
+
+ if (frag_len == 0)
+ netdev_err(netdev, "Received fragment with len = 0\n");
+
+ skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
+ skb->len += frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ frag_nr++;
+
+ if (word3.bits32 & EOF_BIT) {
+ napi_gro_frags(&port->napi);
+ skb = NULL;
+ --budget;
+ }
+ continue;
+
+err_drop:
+ if (skb) {
+ napi_free_frags(&port->napi);
+ skb = NULL;
+ }
+
+ if (mapping)
+ put_page(page);
+
+ port->stats.rx_dropped++;
+ }
+
+ writew(r, ptr_reg);
+ return budget;
+}
+
+static int gmac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct gemini_ethernet_port *port = netdev_priv(napi->dev);
+ struct gemini_ethernet *geth = port->geth;
+ unsigned int freeq_threshold;
+ unsigned int received;
+
+ freeq_threshold = 1 << (geth->freeq_order - 1);
+ u64_stats_update_begin(&port->rx_stats_syncp);
+
+ received = gmac_rx(napi->dev, budget);
+ if (received < budget) {
+ napi_gro_flush(napi, false);
+ napi_complete_done(napi, received);
+ gmac_enable_rx_irq(napi->dev, 1);
+ ++port->rx_napi_exits;
+ }
+
+ port->freeq_refill += (budget - received);
+ if (port->freeq_refill > freeq_threshold) {
+ port->freeq_refill -= freeq_threshold;
+ geth_fill_freeq(geth, true);
+ }
+
+ u64_stats_update_end(&port->rx_stats_syncp);
+ return received;
+}
+
+static void gmac_dump_dma_state(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *ptr_reg;
+ u32 reg[5];
+
+ /* Interrupt status */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* Interrupt enable */
+ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3], reg[4]);
+
+ /* RX DMA status */
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(port->rxq_rwptr);
+ reg[3] = GET_WPTR(port->rxq_rwptr);
+ netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
+ netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* TX DMA status */
+ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
+ reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
+ reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
+ reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
+ netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ reg[0], reg[1], reg[2], reg[3]);
+
+ /* FREE queues status */
+ ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
+
+ reg[0] = GET_RPTR(ptr_reg);
+ reg[1] = GET_WPTR(ptr_reg);
+
+ ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
+
+ reg[2] = GET_RPTR(ptr_reg);
+ reg[3] = GET_WPTR(ptr_reg);
+ netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
+ reg[0], reg[1], reg[2], reg[3]);
+}
+
+static void gmac_update_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int rx_discards, rx_mcast, rx_bcast;
+ struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+
+ rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
+ port->hw_stats[0] += rx_discards;
+ port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
+ rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
+ port->hw_stats[2] += rx_mcast;
+ rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
+ port->hw_stats[3] += rx_bcast;
+ port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
+ port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
+
+ port->stats.rx_missed_errors += rx_discards;
+ port->stats.multicast += rx_mcast;
+ port->stats.multicast += rx_bcast;
+
+ writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+}
+
+/**
+ * gmac_get_intr_flags() - get interrupt status flags for a port from
+ * @netdev: the net device for the port to get flags from
+ * @i: the interrupt status register 0..4
+ */
+static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ struct gemini_ethernet *geth = port->geth;
+ void __iomem *irqif_reg, *irqen_reg;
+ unsigned int offs, val;
+
+ /* Calculate the offset using the stride of the status registers */
+ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
+ GLOBAL_INTERRUPT_STATUS_0_REG);
+
+ irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
+ irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
+
+ val = readl(irqif_reg) & readl(irqen_reg);
+ return val;
+}
+
+static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
+{
+ struct gemini_ethernet_port *port =
+ container_of(timer, struct gemini_ethernet_port,
+ rx_coalesce_timer);
+
+ napi_schedule(&port->napi);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gmac_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port;
+ struct net_device *netdev = data;
+ struct gemini_ethernet *geth;
+ u32 val, orr = 0;
+
+ port = netdev_priv(netdev);
+ geth = port->geth;
+
+ val = gmac_get_intr_flags(netdev, 0);
+ orr |= val;
+
+ if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
+ /* Oh, crap */
+ netdev_err(netdev, "hw failure/sw bug\n");
+ gmac_dump_dma_state(netdev);
+
+ /* don't know how to recover, just reduce losses */
+ gmac_enable_irq(netdev, 0);
+ return IRQ_HANDLED;
+ }
+
+ if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
+ gmac_tx_irq(netdev, 0);
+
+ val = gmac_get_intr_flags(netdev, 1);
+ orr |= val;
+
+ if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
+ gmac_enable_rx_irq(netdev, 0);
+
+ if (!port->rx_coalesce_nsecs) {
+ napi_schedule(&port->napi);
+ } else {
+ ktime_t ktime;
+
+ ktime = ktime_set(0, port->rx_coalesce_nsecs);
+ hrtimer_start(&port->rx_coalesce_timer, ktime,
+ HRTIMER_MODE_REL);
+ }
+ }
+
+ val = gmac_get_intr_flags(netdev, 4);
+ orr |= val;
+
+ if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
+ gmac_update_hw_stats(netdev);
+
+ if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
+ geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ spin_lock(&geth->irq_lock);
+ u64_stats_update_begin(&port->ir_stats_syncp);
+ ++port->stats.rx_fifo_errors;
+ u64_stats_update_end(&port->ir_stats_syncp);
+ spin_unlock(&geth->irq_lock);
+ }
+
+ return orr ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void gmac_start_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 1;
+ dma_ctrl.bits.td_enable = 1;
+ dma_ctrl.bits.loopback = 0;
+ dma_ctrl.bits.drop_small_ack = 0;
+ dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
+ dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
+ dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.rd_bus = HSIZE_8;
+ dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
+ dma_ctrl.bits.td_burst_size = HBURST_INCR8;
+ dma_ctrl.bits.td_bus = HSIZE_8;
+
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static void gmac_stop_dma(struct gemini_ethernet_port *port)
+{
+ void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
+ union gmac_dma_ctrl dma_ctrl;
+
+ dma_ctrl.bits32 = readl(dma_ctrl_reg);
+ dma_ctrl.bits.rd_enable = 0;
+ dma_ctrl.bits.td_enable = 0;
+ writel(dma_ctrl.bits32, dma_ctrl_reg);
+}
+
+static int gmac_open(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err;
+
+ if (!netdev->phydev) {
+ err = gmac_setup_phy(netdev);
+ if (err) {
+ netif_err(port, ifup, netdev,
+ "PHY init failed: %d\n", err);
+ return err;
+ }
+ }
+
+ err = request_irq(netdev->irq, gmac_irq,
+ IRQF_SHARED, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "no IRQ\n");
+ return err;
+ }
+
+ netif_carrier_off(netdev);
+ phy_start(netdev->phydev);
+
+ err = geth_resize_freeq(port);
+ if (err) {
+ netdev_err(netdev, "could not resize freeq\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_rxq(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup RXQ\n");
+ goto err_stop_phy;
+ }
+
+ err = gmac_setup_txqs(netdev);
+ if (err) {
+ netdev_err(netdev, "could not setup TXQs\n");
+ gmac_cleanup_rxq(netdev);
+ goto err_stop_phy;
+ }
+
+ napi_enable(&port->napi);
+
+ gmac_start_dma(port);
+ gmac_enable_irq(netdev, 1);
+ gmac_enable_tx_rx(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+
+ netdev_info(netdev, "opened\n");
+
+ return 0;
+
+err_stop_phy:
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+ return err;
+}
+
+static int gmac_stop(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ hrtimer_cancel(&port->rx_coalesce_timer);
+ netif_tx_stop_all_queues(netdev);
+ gmac_disable_tx_rx(netdev);
+ gmac_stop_dma(port);
+ napi_disable(&port->napi);
+
+ gmac_enable_irq(netdev, 0);
+ gmac_cleanup_rxq(netdev);
+ gmac_cleanup_txqs(netdev);
+
+ phy_stop(netdev->phydev);
+ free_irq(netdev->irq, netdev);
+
+ gmac_update_hw_stats(netdev);
+ return 0;
+}
+
+static void gmac_set_rx_mode(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_rx_fltr filter = { .bits = {
+ .broadcast = 1,
+ .multicast = 1,
+ .unicast = 1,
+ } };
+ struct netdev_hw_addr *ha;
+ unsigned int bit_nr;
+ u32 mc_filter[2];
+
+ mc_filter[1] = 0;
+ mc_filter[0] = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ filter.bits.error = 1;
+ filter.bits.promiscuous = 1;
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mc_filter[1] = ~0;
+ mc_filter[0] = ~0;
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
+ }
+ }
+
+ writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
+ writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
+ writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
+}
+
+static void gmac_write_mac_address(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ __le32 addr[3];
+
+ memset(addr, 0, sizeof(addr));
+ memcpy(addr, netdev->dev_addr, ETH_ALEN);
+
+ writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
+ writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
+ writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
+}
+
+static int gmac_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ gmac_write_mac_address(netdev);
+
+ return 0;
+}
+
+static void gmac_clear_hw_stats(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ readl(port->gmac_base + GMAC_IN_DISCARDS);
+ readl(port->gmac_base + GMAC_IN_ERRORS);
+ readl(port->gmac_base + GMAC_IN_MCAST);
+ readl(port->gmac_base + GMAC_IN_BCAST);
+ readl(port->gmac_base + GMAC_IN_MAC1);
+ readl(port->gmac_base + GMAC_IN_MAC2);
+}
+
+static void gmac_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with RX NAPI */
+ do {
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ stats->rx_packets = port->stats.rx_packets;
+ stats->rx_bytes = port->stats.rx_bytes;
+ stats->rx_errors = port->stats.rx_errors;
+ stats->rx_dropped = port->stats.rx_dropped;
+
+ stats->rx_length_errors = port->stats.rx_length_errors;
+ stats->rx_over_errors = port->stats.rx_over_errors;
+ stats->rx_crc_errors = port->stats.rx_crc_errors;
+ stats->rx_frame_errors = port->stats.rx_frame_errors;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+
+ /* Racing with MIB and TX completion interrupts */
+ do {
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ stats->tx_errors = port->stats.tx_errors;
+ stats->tx_packets = port->stats.tx_packets;
+ stats->tx_bytes = port->stats.tx_bytes;
+
+ stats->multicast = port->stats.multicast;
+ stats->rx_missed_errors = port->stats.rx_missed_errors;
+ stats->rx_fifo_errors = port->stats.rx_fifo_errors;
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+
+ /* Racing with hard_start_xmit */
+ do {
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ stats->tx_dropped = port->stats.tx_dropped;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+
+ stats->rx_dropped += stats->rx_missed_errors;
+}
+
+static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_len = gmac_pick_rx_max_len(new_mtu);
+
+ if (max_len < 0)
+ return -EINVAL;
+
+ gmac_disable_tx_rx(netdev);
+
+ netdev->mtu = new_mtu;
+ gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
+ CONFIG0_MAXLEN_MASK);
+
+ netdev_update_features(netdev);
+
+ gmac_enable_tx_rx(netdev);
+
+ return 0;
+}
+
+static netdev_features_t gmac_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
+ features &= ~GMAC_OFFLOAD_FEATURES;
+
+ return features;
+}
+
+static int gmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int enable = features & NETIF_F_RXCSUM;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&port->config_lock, flags);
+
+ reg = readl(port->gmac_base + GMAC_CONFIG0);
+ reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
+ writel(reg, port->gmac_base + GMAC_CONFIG0);
+
+ spin_unlock_irqrestore(&port->config_lock, flags);
+ return 0;
+}
+
+static int gmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
+}
+
+static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
+}
+
+static void gmac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *estats, u64 *values)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ unsigned int start;
+ u64 *p;
+ int i;
+
+ gmac_update_hw_stats(netdev);
+
+ /* Racing with MIB interrupt */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+
+ for (i = 0; i < RX_STATS_NUM; i++)
+ *p++ = port->hw_stats[i];
+
+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ values = p;
+
+ /* Racing with RX NAPI */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+
+ for (i = 0; i < RX_STATUS_NUM; i++)
+ *p++ = port->rx_stats[i];
+ for (i = 0; i < RX_CHKSUM_NUM; i++)
+ *p++ = port->rx_csum_stats[i];
+ *p++ = port->rx_napi_exits;
+
+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ values = p;
+
+ /* Racing with TX start_xmit */
+ do {
+ p = values;
+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+
+ for (i = 0; i < TX_MAX_FRAGS; i++) {
+ *values++ = port->tx_frag_stats[i];
+ port->tx_frag_stats[i] = 0;
+ }
+ *values++ = port->tx_frags_linearized;
+ *values++ = port->tx_hw_csummed;
+
+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+}
+
+static int gmac_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+
+ return 0;
+}
+
+static int gmac_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+}
+
+static int gmac_nway_reset(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return -ENXIO;
+ return phy_start_aneg(netdev->phydev);
+}
+
+static void gmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ pparam->rx_pause = config0.bits.rx_fc_en;
+ pparam->tx_pause = config0.bits.tx_fc_en;
+ pparam->autoneg = true;
+}
+
+static void gmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ union gmac_config0 config0;
+
+ config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+
+ rp->rx_max_pending = 1 << 15;
+ rp->rx_mini_max_pending = 0;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = 1 << 15;
+
+ rp->rx_pending = 1 << port->rxq_order;
+ rp->rx_mini_pending = 0;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = 1 << port->txq_order;
+}
+
+static int gmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *rp)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+ int err = 0;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (rp->rx_pending) {
+ port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
+ err = geth_resize_freeq(port);
+ }
+ if (rp->tx_pending) {
+ port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
+ port->irq_every_tx_packets = 1 << (port->txq_order - 2);
+ }
+
+ return err;
+}
+
+static int gmac_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ ecmd->rx_max_coalesced_frames = 1;
+ ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
+ ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
+
+ return 0;
+}
+
+static int gmac_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ if (ecmd->tx_max_coalesced_frames < 1)
+ return -EINVAL;
+ if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
+ return -EINVAL;
+
+ port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
+ port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static u32 gmac_get_msglevel(struct net_device *netdev)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ return port->msg_enable;
+}
+
+static void gmac_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct gemini_ethernet_port *port = netdev_priv(netdev);
+
+ port->msg_enable = level;
+}
+
+static void gmac_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
+}
+
+static const struct net_device_ops gmac_351x_ops = {
+ .ndo_init = gmac_init,
+ .ndo_uninit = gmac_uninit,
+ .ndo_open = gmac_open,
+ .ndo_stop = gmac_stop,
+ .ndo_start_xmit = gmac_start_xmit,
+ .ndo_tx_timeout = gmac_tx_timeout,
+ .ndo_set_rx_mode = gmac_set_rx_mode,
+ .ndo_set_mac_address = gmac_set_mac_address,
+ .ndo_get_stats64 = gmac_get_stats64,
+ .ndo_change_mtu = gmac_change_mtu,
+ .ndo_fix_features = gmac_fix_features,
+ .ndo_set_features = gmac_set_features,
+};
+
+static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .get_sset_count = gmac_get_sset_count,
+ .get_strings = gmac_get_strings,
+ .get_ethtool_stats = gmac_get_ethtool_stats,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = gmac_get_ksettings,
+ .set_link_ksettings = gmac_set_ksettings,
+ .nway_reset = gmac_nway_reset,
+ .get_pauseparam = gmac_get_pauseparam,
+ .get_ringparam = gmac_get_ringparam,
+ .set_ringparam = gmac_set_ringparam,
+ .get_coalesce = gmac_get_coalesce,
+ .set_coalesce = gmac_set_coalesce,
+ .get_msglevel = gmac_get_msglevel,
+ .set_msglevel = gmac_set_msglevel,
+ .get_drvinfo = gmac_get_drvinfo,
+};
+
+static irqreturn_t gemini_port_irq_thread(int irq, void *data)
+{
+ unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ unsigned long flags;
+
+ geth = port->geth;
+ /* The queue is half empty so refill it */
+ geth_fill_freeq(geth, true);
+
+ spin_lock_irqsave(&geth->irq_lock, flags);
+ /* ACK queue interrupt */
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ /* Enable queue interrupt again */
+ irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gemini_port_irq(int irq, void *data)
+{
+ struct gemini_ethernet_port *port = data;
+ struct gemini_ethernet *geth;
+ irqreturn_t ret = IRQ_NONE;
+ u32 val, en;
+
+ geth = port->geth;
+ spin_lock(&geth->irq_lock);
+
+ val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ if (val & en & SWFQ_EMPTY_INT_BIT) {
+ /* Disable the queue empty interrupt while we work on
+ * processing the queue. Also disable overrun interrupts
+ * as there is not much we can do about it here.
+ */
+ en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
+ | GMAC1_RX_OVERRUN_INT_BIT);
+ writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ spin_unlock(&geth->irq_lock);
+
+ return ret;
+}
+
+static void gemini_port_remove(struct gemini_ethernet_port *port)
+{
+ if (port->netdev)
+ unregister_netdev(port->netdev);
+ clk_disable_unprepare(port->pclk);
+ geth_cleanup_freeq(port->geth);
+}
+
+static void gemini_ethernet_init(struct gemini_ethernet *geth)
+{
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
+ writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
+
+ /* Interrupt config:
+ *
+ * GMAC0 intr bits ------> int0 ----> eth0
+ * GMAC1 intr bits ------> int1 ----> eth1
+ * TOE intr -------------> int1 ----> eth1
+ * Classification Intr --> int0 ----> eth0
+ * Default Q0 -----------> int0 ----> eth0
+ * Default Q1 -----------> int1 ----> eth1
+ * FreeQ intr -----------> int1 ----> eth1
+ */
+ writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
+ writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
+ writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
+ writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
+
+ /* edge-triggered interrupts packed to level-triggered one... */
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
+ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
+
+ /* Set up queue */
+ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
+ writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
+ writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
+
+ geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
+ /* This makes the queue resize on probe() so that we
+ * set up and enable the queue IRQ. FIXME: fragile.
+ */
+ geth->freeq_order = 1;
+}
+
+static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
+{
+ port->mac_addr[0] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
+ port->mac_addr[1] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
+ port->mac_addr[2] =
+ cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
+}
+
+static int gemini_ethernet_port_probe(struct platform_device *pdev)
+{
+ char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct gemini_ethernet_port *port;
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ struct net_device *netdev;
+ struct resource *gmacres;
+ struct resource *dmares;
+ struct device *parent;
+ unsigned int id;
+ int irq;
+ int ret;
+
+ parent = dev->parent;
+ geth = dev_get_drvdata(parent);
+
+ if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
+ id = 0;
+ else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
+ id = 1;
+ else
+ return -ENODEV;
+
+ dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
+
+ netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
+ if (!netdev) {
+ dev_err(dev, "Can't allocate ethernet device #%d\n", id);
+ return -ENOMEM;
+ }
+
+ port = netdev_priv(netdev);
+ SET_NETDEV_DEV(netdev, dev);
+ port->netdev = netdev;
+ port->id = id;
+ port->geth = geth;
+ port->dev = dev;
+
+ /* DMA memory */
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dmares) {
+ dev_err(dev, "no DMA resource\n");
+ return -ENODEV;
+ }
+ port->dma_base = devm_ioremap_resource(dev, dmares);
+ if (IS_ERR(port->dma_base))
+ return PTR_ERR(port->dma_base);
+
+ /* GMAC config memory */
+ gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!gmacres) {
+ dev_err(dev, "no GMAC resource\n");
+ return -ENODEV;
+ }
+ port->gmac_base = devm_ioremap_resource(dev, gmacres);
+ if (IS_ERR(port->gmac_base))
+ return PTR_ERR(port->gmac_base);
+
+ /* Interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no IRQ\n");
+ return irq ? irq : -ENODEV;
+ }
+ port->irq = irq;
+
+ /* Clock the port */
+ port->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(port->pclk)) {
+ dev_err(dev, "no PCLK\n");
+ return PTR_ERR(port->pclk);
+ }
+ ret = clk_prepare_enable(port->pclk);
+ if (ret)
+ return ret;
+
+ /* Maybe there is a nice ethernet address we should use */
+ gemini_port_save_mac_addr(port);
+
+ /* Reset the port */
+ port->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(port->reset)) {
+ dev_err(dev, "no reset\n");
+ return PTR_ERR(port->reset);
+ }
+ reset_control_reset(port->reset);
+ usleep_range(100, 500);
+
+ /* Assign pointer in the main state container */
+ if (!id)
+ geth->port0 = port;
+ else
+ geth->port1 = port;
+ platform_set_drvdata(pdev, port);
+
+ /* Set up and register the netdev */
+ netdev->dev_id = port->id;
+ netdev->irq = irq;
+ netdev->netdev_ops = &gmac_351x_ops;
+ netdev->ethtool_ops = &gmac_351x_ethtool_ops;
+
+ spin_lock_init(&port->config_lock);
+ gmac_clear_hw_stats(netdev);
+
+ netdev->hw_features = GMAC_OFFLOAD_FEATURES;
+ netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+
+ port->freeq_refill = 0;
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll,
+ DEFAULT_NAPI_WEIGHT);
+
+ if (is_valid_ether_addr((void *)port->mac_addr)) {
+ memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ } else {
+ dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
+ port->mac_addr[0], port->mac_addr[1],
+ port->mac_addr[2]);
+ dev_info(dev, "using a random ethernet address\n");
+ random_ether_addr(netdev->dev_addr);
+ }
+ gmac_write_mac_address(netdev);
+
+ ret = devm_request_threaded_irq(port->dev,
+ port->irq,
+ gemini_port_irq,
+ gemini_port_irq_thread,
+ IRQF_SHARED,
+ port_names[port->id],
+ port);
+ if (ret)
+ return ret;
+
+ ret = register_netdev(netdev);
+ if (!ret) {
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
+ netdev_info(netdev,
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
+ }
+
+ port->netdev = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static int gemini_ethernet_port_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
+
+ gemini_port_remove(port);
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_port_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
+
+static struct platform_driver gemini_ethernet_port_driver = {
+ .driver = {
+ .name = "gemini-ethernet-port",
+ .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ },
+ .probe = gemini_ethernet_port_probe,
+ .remove = gemini_ethernet_port_remove,
+};
+
+static int gemini_ethernet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gemini_ethernet *geth;
+ unsigned int retry = 5;
+ struct resource *res;
+ u32 val;
+
+ /* Global registers */
+ geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
+ if (!geth)
+ return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ geth->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(geth->base))
+ return PTR_ERR(geth->base);
+ geth->dev = dev;
+
+ /* Wait for ports to stabilize */
+ do {
+ udelay(2);
+ val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
+ barrier();
+ } while (!val && --retry);
+ if (!retry) {
+ dev_err(dev, "failed to reset ethernet\n");
+ return -EIO;
+ }
+ dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
+ (val >> 4) & 0xFFFU, val & 0xFU);
+
+ spin_lock_init(&geth->irq_lock);
+ spin_lock_init(&geth->freeq_lock);
+ gemini_ethernet_init(geth);
+
+ /* The children will use this */
+ platform_set_drvdata(pdev, geth);
+
+ /* Spawn child devices for the two ports */
+ return devm_of_platform_populate(dev);
+}
+
+static int gemini_ethernet_remove(struct platform_device *pdev)
+{
+ struct gemini_ethernet *geth = platform_get_drvdata(pdev);
+
+ gemini_ethernet_init(geth);
+ geth_cleanup_freeq(geth);
+
+ return 0;
+}
+
+static const struct of_device_id gemini_ethernet_of_match[] = {
+ {
+ .compatible = "cortina,gemini-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
+
+static struct platform_driver gemini_ethernet_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ },
+ .probe = gemini_ethernet_probe,
+ .remove = gemini_ethernet_remove,
+};
+
+static int __init gemini_ethernet_module_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&gemini_ethernet_port_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&gemini_ethernet_driver);
+ if (ret) {
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(gemini_ethernet_module_init);
+
+static void __exit gemini_ethernet_module_exit(void)
+{
+ platform_driver_unregister(&gemini_ethernet_driver);
+ platform_driver_unregister(&gemini_ethernet_port_driver);
+}
+module_exit(gemini_ethernet_module_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Register definitions for Gemini GMAC Ethernet device driver
+ *
+ * Copyright (C) 2006 Storlink, Corp.
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Copyright (C) 2010 Michał Mirosław <mirq-linux@rere.qmqm.pl>
+ * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ */
+#ifndef _GEMINI_ETHERNET_H
+#define _GEMINI_ETHERNET_H
+
+#include <linux/bitops.h>
+
+/* Base Registers */
+#define TOE_NONTOE_QUE_HDR_BASE 0x2000
+#define TOE_TOE_QUE_HDR_BASE 0x3000
+
+/* Queue ID */
+#define TOE_SW_FREE_QID 0x00
+#define TOE_HW_FREE_QID 0x01
+#define TOE_GMAC0_SW_TXQ0_QID 0x02
+#define TOE_GMAC0_SW_TXQ1_QID 0x03
+#define TOE_GMAC0_SW_TXQ2_QID 0x04
+#define TOE_GMAC0_SW_TXQ3_QID 0x05
+#define TOE_GMAC0_SW_TXQ4_QID 0x06
+#define TOE_GMAC0_SW_TXQ5_QID 0x07
+#define TOE_GMAC0_HW_TXQ0_QID 0x08
+#define TOE_GMAC0_HW_TXQ1_QID 0x09
+#define TOE_GMAC0_HW_TXQ2_QID 0x0A
+#define TOE_GMAC0_HW_TXQ3_QID 0x0B
+#define TOE_GMAC1_SW_TXQ0_QID 0x12
+#define TOE_GMAC1_SW_TXQ1_QID 0x13
+#define TOE_GMAC1_SW_TXQ2_QID 0x14
+#define TOE_GMAC1_SW_TXQ3_QID 0x15
+#define TOE_GMAC1_SW_TXQ4_QID 0x16
+#define TOE_GMAC1_SW_TXQ5_QID 0x17
+#define TOE_GMAC1_HW_TXQ0_QID 0x18
+#define TOE_GMAC1_HW_TXQ1_QID 0x19
+#define TOE_GMAC1_HW_TXQ2_QID 0x1A
+#define TOE_GMAC1_HW_TXQ3_QID 0x1B
+#define TOE_GMAC0_DEFAULT_QID 0x20
+#define TOE_GMAC1_DEFAULT_QID 0x21
+#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */
+#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */
+
+/* TOE DMA Queue Size should be 2^n, n = 6...12
+ * TOE DMA Queues are the following queue types:
+ * SW Free Queue, HW Free Queue,
+ * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
+ * The base address and descriptor number are configured at
+ * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
+ */
+#define GET_WPTR(addr) readw((addr) + 2)
+#define GET_RPTR(addr) readw((addr))
+#define SET_WPTR(addr, data) writew((data), (addr) + 2)
+#define SET_RPTR(addr, data) writew((data), (addr))
+#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask))
+#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask))
+#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask))
+#define __RWPTR_MASK(order) ((1 << (order)) - 1)
+#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order)))
+#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order)))
+#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \
+ __RWPTR_MASK((order)))
+
+/* Global registers */
+#define GLOBAL_TOE_VERSION_REG 0x0000
+#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004
+#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008
+#define GLOBAL_DMA_SKB_SIZE_REG 0x0010
+#define GLOBAL_SWFQ_RWPTR_REG 0x0014
+#define GLOBAL_HWFQ_RWPTR_REG 0x0018
+#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020
+#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024
+#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028
+#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030
+#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034
+#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038
+#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040
+#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044
+#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048
+#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050
+#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054
+#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058
+#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060
+#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064
+#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068
+#define GLOBAL_HASH_TABLE_BASE_REG 0x006C
+#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070
+
+/* GMAC 0/1 DMA/TOE register */
+#define GMAC_DMA_CTRL_REG 0x0000
+#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004
+#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008
+#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C
+#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010
+#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014
+#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018
+#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C
+#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020
+#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024
+#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028
+#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C
+#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030
+#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
+#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038
+#define GMAC_DMA_TX_CURR_DESC_REG 0x003C
+#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040
+#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044
+#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048
+#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C
+#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050
+#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054
+#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058
+#define GMAC_DMA_RX_CURR_DESC_REG 0x005C
+#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060
+#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064
+#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068
+#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C
+#define GMAC_HASH_ENGINE_REG0 0x0070
+#define GMAC_HASH_ENGINE_REG1 0x0074
+/* matching rule 0 Control register 0 */
+#define GMAC_MR0CR0 0x0078
+#define GMAC_MR0CR1 0x007C
+#define GMAC_MR0CR2 0x0080
+#define GMAC_MR1CR0 0x0084
+#define GMAC_MR1CR1 0x0088
+#define GMAC_MR1CR2 0x008C
+#define GMAC_MR2CR0 0x0090
+#define GMAC_MR2CR1 0x0094
+#define GMAC_MR2CR2 0x0098
+#define GMAC_MR3CR0 0x009C
+#define GMAC_MR3CR1 0x00A0
+#define GMAC_MR3CR2 0x00A4
+/* Support Protocol Register 0 */
+#define GMAC_SPR0 0x00A8
+#define GMAC_SPR1 0x00AC
+#define GMAC_SPR2 0x00B0
+#define GMAC_SPR3 0x00B4
+#define GMAC_SPR4 0x00B8
+#define GMAC_SPR5 0x00BC
+#define GMAC_SPR6 0x00C0
+#define GMAC_SPR7 0x00C4
+/* GMAC Hash/Rx/Tx AHB Weighting register */
+#define GMAC_AHB_WEIGHT_REG 0x00C8
+
+/* TOE GMAC 0/1 register */
+#define GMAC_STA_ADD0 0x0000
+#define GMAC_STA_ADD1 0x0004
+#define GMAC_STA_ADD2 0x0008
+#define GMAC_RX_FLTR 0x000c
+#define GMAC_MCAST_FIL0 0x0010
+#define GMAC_MCAST_FIL1 0x0014
+#define GMAC_CONFIG0 0x0018
+#define GMAC_CONFIG1 0x001c
+#define GMAC_CONFIG2 0x0020
+#define GMAC_CONFIG3 0x0024
+#define GMAC_RESERVED 0x0028
+#define GMAC_STATUS 0x002c
+#define GMAC_IN_DISCARDS 0x0030
+#define GMAC_IN_ERRORS 0x0034
+#define GMAC_IN_MCAST 0x0038
+#define GMAC_IN_BCAST 0x003c
+#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */
+#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */
+
+#define RX_STATS_NUM 6
+
+/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
+union dma_q_base_size {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define DMA_Q_BASE_MASK (~0x0f)
+
+/* DMA SKB Buffer register (offset 0x0008) */
+union dma_skb_size {
+ unsigned int bits32;
+ struct bit_0008 {
+ unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */
+ unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */
+ } bits;
+};
+
+/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
+union dma_rwptr {
+ unsigned int bits32;
+ struct bit_000c {
+ unsigned int rptr : 16; /* Read Ptr, RO */
+ unsigned int wptr : 16; /* Write Ptr, RW */
+ } bits;
+};
+
+/* Interrupt Status Register 0 (offset 0x0020)
+ * Interrupt Mask Register 0 (offset 0x0024)
+ * Interrupt Select Register 0 (offset 0x0028)
+ */
+#define GMAC1_TXDERR_INT_BIT BIT(31)
+#define GMAC1_TXPERR_INT_BIT BIT(30)
+#define GMAC0_TXDERR_INT_BIT BIT(29)
+#define GMAC0_TXPERR_INT_BIT BIT(28)
+#define GMAC1_RXDERR_INT_BIT BIT(27)
+#define GMAC1_RXPERR_INT_BIT BIT(26)
+#define GMAC0_RXDERR_INT_BIT BIT(25)
+#define GMAC0_RXPERR_INT_BIT BIT(24)
+#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23)
+#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22)
+#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21)
+#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20)
+#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19)
+#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18)
+#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17)
+#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16)
+#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15)
+#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14)
+#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13)
+#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12)
+#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11)
+#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10)
+#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9)
+#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8)
+#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7)
+#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6)
+#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5)
+#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4)
+#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3)
+#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2)
+#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1)
+#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0)
+
+/* Interrupt Status Register 1 (offset 0x0030)
+ * Interrupt Mask Register 1 (offset 0x0034)
+ * Interrupt Select Register 1 (offset 0x0038)
+ */
+#define TOE_IQ3_FULL_INT_BIT BIT(31)
+#define TOE_IQ2_FULL_INT_BIT BIT(30)
+#define TOE_IQ1_FULL_INT_BIT BIT(29)
+#define TOE_IQ0_FULL_INT_BIT BIT(28)
+#define TOE_IQ3_INT_BIT BIT(27)
+#define TOE_IQ2_INT_BIT BIT(26)
+#define TOE_IQ1_INT_BIT BIT(25)
+#define TOE_IQ0_INT_BIT BIT(24)
+#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23)
+#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22)
+#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21)
+#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20)
+#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19)
+#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18)
+#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17)
+#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16)
+#define CLASS_RX_INT_BIT(x) BIT((x + 2))
+#define DEFAULT_Q1_INT_BIT BIT(1)
+#define DEFAULT_Q0_INT_BIT BIT(0)
+
+#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
+ TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
+#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
+ TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
+#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
+#define TOE_CLASS_RX_INT_BITS 0xfffc
+
+/* Interrupt Status Register 2 (offset 0x0040)
+ * Interrupt Mask Register 2 (offset 0x0044)
+ * Interrupt Select Register 2 (offset 0x0048)
+ */
+#define TOE_QL_FULL_INT_BIT(x) BIT(x)
+
+/* Interrupt Status Register 3 (offset 0x0050)
+ * Interrupt Mask Register 3 (offset 0x0054)
+ * Interrupt Select Register 3 (offset 0x0058)
+ */
+#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32)
+
+/* Interrupt Status Register 4 (offset 0x0060)
+ * Interrupt Mask Register 4 (offset 0x0064)
+ * Interrupt Select Register 4 (offset 0x0068)
+ */
+#define GMAC1_RESERVED_INT_BIT BIT(31)
+#define GMAC1_MIB_INT_BIT BIT(30)
+#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29)
+#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28)
+#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27)
+#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26)
+#define GMAC1_RX_OVERRUN_INT_BIT BIT(25)
+#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24)
+#define GMAC0_RESERVED_INT_BIT BIT(23)
+#define GMAC0_MIB_INT_BIT BIT(22)
+#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21)
+#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20)
+#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19)
+#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18)
+#define GMAC0_RX_OVERRUN_INT_BIT BIT(17)
+#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16)
+#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2)
+#define HWFQ_EMPTY_INT_BIT BIT(1)
+#define SWFQ_EMPTY_INT_BIT BIT(0)
+
+#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
+ GMAC0_RX_PAUSE_ON_INT_BIT | \
+ GMAC0_TX_PAUSE_ON_INT_BIT | \
+ GMAC0_RX_PAUSE_OFF_INT_BIT | \
+ GMAC0_TX_PAUSE_OFF_INT_BIT | \
+ GMAC0_RX_OVERRUN_INT_BIT | \
+ GMAC0_STATUS_CHANGE_INT_BIT)
+#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
+ GMAC1_RX_PAUSE_ON_INT_BIT | \
+ GMAC1_TX_PAUSE_ON_INT_BIT | \
+ GMAC1_RX_PAUSE_OFF_INT_BIT | \
+ GMAC1_TX_PAUSE_OFF_INT_BIT | \
+ GMAC1_RX_OVERRUN_INT_BIT | \
+ GMAC1_STATUS_CHANGE_INT_BIT)
+
+#define CLASS_RX_FULL_INT_BITS 0xfffc
+
+/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */
+union queue_threshold {
+ unsigned int bits32;
+ struct bit_0070_2 {
+ /* 7:0 Software Free Queue Empty Threshold */
+ unsigned int swfq_empty:8;
+ /* 15:8 Hardware Free Queue Empty Threshold */
+ unsigned int hwfq_empty:8;
+ /* 23:16 */
+ unsigned int intrq:8;
+ /* 31:24 */
+ unsigned int toe_class:8;
+ } bits;
+};
+
+/* GMAC DMA Control Register
+ * GMAC0 offset 0x8000
+ * GMAC1 offset 0xC000
+ */
+union gmac_dma_ctrl {
+ unsigned int bits32;
+ struct bit_8000 {
+ /* bit 1:0 Peripheral Bus Width */
+ unsigned int td_bus:2;
+ /* bit 3:2 TxDMA max burst size for every AHB request */
+ unsigned int td_burst_size:2;
+ /* bit 7:4 TxDMA protection control */
+ unsigned int td_prot:4;
+ /* bit 9:8 Peripheral Bus Width */
+ unsigned int rd_bus:2;
+ /* bit 11:10 DMA max burst size for every AHB request */
+ unsigned int rd_burst_size:2;
+ /* bit 15:12 DMA Protection Control */
+ unsigned int rd_prot:4;
+ /* bit 17:16 */
+ unsigned int rd_insert_bytes:2;
+ /* bit 27:18 */
+ unsigned int reserved:10;
+ /* bit 28 1: Drop, 0: Accept */
+ unsigned int drop_small_ack:1;
+ /* bit 29 Loopback TxDMA to RxDMA */
+ unsigned int loopback:1;
+ /* bit 30 Tx DMA Enable */
+ unsigned int td_enable:1;
+ /* bit 31 Rx DMA Enable */
+ unsigned int rd_enable:1;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 0
+ * GMAC0 offset 0x8004
+ * GMAC1 offset 0xC004
+ */
+union gmac_tx_wcr0 {
+ unsigned int bits32;
+ struct bit_8004 {
+ /* bit 5:0 HW TX Queue 3 */
+ unsigned int hw_tq0:6;
+ /* bit 11:6 HW TX Queue 2 */
+ unsigned int hw_tq1:6;
+ /* bit 17:12 HW TX Queue 1 */
+ unsigned int hw_tq2:6;
+ /* bit 23:18 HW TX Queue 0 */
+ unsigned int hw_tq3:6;
+ /* bit 31:24 */
+ unsigned int reserved:8;
+ } bits;
+};
+
+/* GMAC Tx Weighting Control Register 1
+ * GMAC0 offset 0x8008
+ * GMAC1 offset 0xC008
+ */
+union gmac_tx_wcr1 {
+ unsigned int bits32;
+ struct bit_8008 {
+ /* bit 4:0 SW TX Queue 0 */
+ unsigned int sw_tq0:5;
+ /* bit 9:5 SW TX Queue 1 */
+ unsigned int sw_tq1:5;
+ /* bit 14:10 SW TX Queue 2 */
+ unsigned int sw_tq2:5;
+ /* bit 19:15 SW TX Queue 3 */
+ unsigned int sw_tq3:5;
+ /* bit 24:20 SW TX Queue 4 */
+ unsigned int sw_tq4:5;
+ /* bit 29:25 SW TX Queue 5 */
+ unsigned int sw_tq5:5;
+ /* bit 31:30 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 0 Register
+ * GMAC0 offset 0x8040
+ * GMAC1 offset 0xC040
+ */
+union gmac_txdesc_0 {
+ unsigned int bits32;
+ struct bit_8040 {
+ /* bit 15:0 Transfer size */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 22 Tx Status, 1: Successful 0: Failed */
+ unsigned int status_tx_ok:1;
+ /* bit 28:23 Tx Status, Reserved bits */
+ unsigned int status_rvd:6;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 */
+ unsigned int reserved:1;
+ } bits;
+};
+
+/* GMAC DMA Tx Description Word 1 Register
+ * GMAC0 offset 0x8044
+ * GMAC1 offset 0xC044
+ */
+union gmac_txdesc_1 {
+ unsigned int bits32;
+ struct txdesc_word1 {
+ /* bit 15: 0 Tx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 16 TSS segmentation use MTU setting */
+ unsigned int mtu_enable:1;
+ /* bit 17 IPV4 Header Checksum Enable */
+ unsigned int ip_chksum:1;
+ /* bit 18 IPV6 Tx Enable */
+ unsigned int ipv6_enable:1;
+ /* bit 19 TCP Checksum Enable */
+ unsigned int tcp_chksum:1;
+ /* bit 20 UDP Checksum Enable */
+ unsigned int udp_chksum:1;
+ /* bit 21 Bypass HW offload engine */
+ unsigned int bypass_tss:1;
+ /* bit 22 Don't update IP length field */
+ unsigned int ip_fixed_len:1;
+ /* bit 31:23 Tx Flag, Reserved */
+ unsigned int reserved:9;
+ } bits;
+};
+
+#define TSS_IP_FIXED_LEN_BIT BIT(22)
+#define TSS_BYPASS_BIT BIT(21)
+#define TSS_UDP_CHKSUM_BIT BIT(20)
+#define TSS_TCP_CHKSUM_BIT BIT(19)
+#define TSS_IPV6_ENABLE_BIT BIT(18)
+#define TSS_IP_CHKSUM_BIT BIT(17)
+#define TSS_MTU_ENABLE_BIT BIT(16)
+
+#define TSS_CHECKUM_ENABLE \
+ (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
+ TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
+
+/* GMAC DMA Tx Description Word 2 Register
+ * GMAC0 offset 0x8048
+ * GMAC1 offset 0xC048
+ */
+union gmac_txdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+/* GMAC DMA Tx Description Word 3 Register
+ * GMAC0 offset 0x804C
+ * GMAC1 offset 0xC04C
+ */
+union gmac_txdesc_3 {
+ unsigned int bits32;
+ struct txdesc_word3 {
+ /* bit 12: 0 Tx Frame Byte Count */
+ unsigned int mtu_size:13;
+ /* bit 28:13 */
+ unsigned int reserved:16;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+#define SOF_EOF_BIT_MASK 0x3fffffff
+#define SOF_BIT 0x80000000
+#define EOF_BIT 0x40000000
+#define EOFIE_BIT BIT(29)
+#define MTU_SIZE_BIT_MASK 0x1fff
+
+/* GMAC Tx Descriptor */
+struct gmac_txdesc {
+ union gmac_txdesc_0 word0;
+ union gmac_txdesc_1 word1;
+ union gmac_txdesc_2 word2;
+ union gmac_txdesc_3 word3;
+};
+
+/* GMAC DMA Rx Description Word 0 Register
+ * GMAC0 offset 0x8060
+ * GMAC1 offset 0xC060
+ */
+union gmac_rxdesc_0 {
+ unsigned int bits32;
+ struct bit_8060 {
+ /* bit 15:0 number of descriptors used for the current frame */
+ unsigned int buffer_size:16;
+ /* bit 21:16 number of descriptors used for the current frame */
+ unsigned int desc_count:6;
+ /* bit 24:22 Status of rx frame */
+ unsigned int status:4;
+ /* bit 28:26 Check Sum Status */
+ unsigned int chksum_status:3;
+ /* bit 29 protocol error during processing this descriptor */
+ unsigned int perr:1;
+ /* bit 30 data error during processing this descriptor */
+ unsigned int derr:1;
+ /* bit 31 TOE/CIS Queue Full dropped packet to default queue */
+ unsigned int drop:1;
+ } bits;
+};
+
+#define GMAC_RXDESC_0_T_derr BIT(30)
+#define GMAC_RXDESC_0_T_perr BIT(29)
+#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26)
+#define GMAC_RXDESC_0_T_status(x) BIT(x + 22)
+#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16)
+
+#define RX_CHKSUM_IP_UDP_TCP_OK 0
+#define RX_CHKSUM_IP_OK_ONLY 1
+#define RX_CHKSUM_NONE 2
+#define RX_CHKSUM_IP_ERR_UNKNOWN 4
+#define RX_CHKSUM_IP_ERR 5
+#define RX_CHKSUM_TCP_UDP_ERR 6
+#define RX_CHKSUM_NUM 8
+
+#define RX_STATUS_GOOD_FRAME 0
+#define RX_STATUS_TOO_LONG_GOOD_CRC 1
+#define RX_STATUS_RUNT_FRAME 2
+#define RX_STATUS_SFD_NOT_FOUND 3
+#define RX_STATUS_CRC_ERROR 4
+#define RX_STATUS_TOO_LONG_BAD_CRC 5
+#define RX_STATUS_ALIGNMENT_ERROR 6
+#define RX_STATUS_TOO_LONG_BAD_ALIGN 7
+#define RX_STATUS_RX_ERR 8
+#define RX_STATUS_DA_FILTERED 9
+#define RX_STATUS_BUFFER_FULL 10
+#define RX_STATUS_NUM 16
+
+#define RX_ERROR_LENGTH(s) \
+ ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_OVER(s) \
+ ((s) == RX_STATUS_BUFFER_FULL)
+#define RX_ERROR_CRC(s) \
+ ((s) == RX_STATUS_CRC_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_CRC)
+#define RX_ERROR_FRAME(s) \
+ ((s) == RX_STATUS_ALIGNMENT_ERROR || \
+ (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
+#define RX_ERROR_FIFO(s) \
+ (0)
+
+/* GMAC DMA Rx Description Word 1 Register
+ * GMAC0 offset 0x8064
+ * GMAC1 offset 0xC064
+ */
+union gmac_rxdesc_1 {
+ unsigned int bits32;
+ struct rxdesc_word1 {
+ /* bit 15: 0 Rx Frame Byte Count */
+ unsigned int byte_count:16;
+ /* bit 31:16 Software ID */
+ unsigned int sw_id:16;
+ } bits;
+};
+
+/* GMAC DMA Rx Description Word 2 Register
+ * GMAC0 offset 0x8068
+ * GMAC1 offset 0xC068
+ */
+union gmac_rxdesc_2 {
+ unsigned int bits32;
+ unsigned int buf_adr;
+};
+
+#define RX_INSERT_NONE 0
+#define RX_INSERT_1_BYTE 1
+#define RX_INSERT_2_BYTE 2
+#define RX_INSERT_3_BYTE 3
+
+/* GMAC DMA Rx Description Word 3 Register
+ * GMAC0 offset 0x806C
+ * GMAC1 offset 0xC06C
+ */
+union gmac_rxdesc_3 {
+ unsigned int bits32;
+ struct rxdesc_word3 {
+ /* bit 7: 0 L3 data offset */
+ unsigned int l3_offset:8;
+ /* bit 15: 8 L4 data offset */
+ unsigned int l4_offset:8;
+ /* bit 23: 16 L7 data offset */
+ unsigned int l7_offset:8;
+ /* bit 24 Duplicated ACK detected */
+ unsigned int dup_ack:1;
+ /* bit 25 abnormal case found */
+ unsigned int abnormal:1;
+ /* bit 26 IPV4 option or IPV6 extension header */
+ unsigned int option:1;
+ /* bit 27 Out of Sequence packet */
+ unsigned int out_of_seq:1;
+ /* bit 28 Control Flag is present */
+ unsigned int ctrl_flag:1;
+ /* bit 29 End of frame interrupt enable */
+ unsigned int eofie:1;
+ /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
+ unsigned int sof_eof:2;
+ } bits;
+};
+
+/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
+struct gmac_rxdesc {
+ union gmac_rxdesc_0 word0;
+ union gmac_rxdesc_1 word1;
+ union gmac_rxdesc_2 word2;
+ union gmac_rxdesc_3 word3;
+};
+
+/* GMAC Matching Rule Control Register 0
+ * GMAC0 offset 0x8078
+ * GMAC1 offset 0xC078
+ */
+#define MR_L2_BIT BIT(31)
+#define MR_L3_BIT BIT(30)
+#define MR_L4_BIT BIT(29)
+#define MR_L7_BIT BIT(28)
+#define MR_PORT_BIT BIT(27)
+#define MR_PRIORITY_BIT BIT(26)
+#define MR_DA_BIT BIT(23)
+#define MR_SA_BIT BIT(22)
+#define MR_ETHER_TYPE_BIT BIT(21)
+#define MR_VLAN_BIT BIT(20)
+#define MR_PPPOE_BIT BIT(19)
+#define MR_IP_VER_BIT BIT(15)
+#define MR_IP_HDR_LEN_BIT BIT(14)
+#define MR_FLOW_LABLE_BIT BIT(13)
+#define MR_TOS_TRAFFIC_BIT BIT(12)
+#define MR_SPR_BIT(x) BIT(x)
+#define MR_SPR_BITS 0xff
+
+/* GMAC_AHB_WEIGHT registers
+ * GMAC0 offset 0x80C8
+ * GMAC1 offset 0xC0C8
+ */
+union gmac_ahb_weight {
+ unsigned int bits32;
+ struct bit_80C8 {
+ /* 4:0 */
+ unsigned int hash_weight:5;
+ /* 9:5 */
+ unsigned int rx_weight:5;
+ /* 14:10 */
+ unsigned int tx_weight:5;
+ /* 19:15 Rx Data Pre Request FIFO Threshold */
+ unsigned int pre_req:5;
+ /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
+ unsigned int tq_dv_threshold:5;
+ /* 31:25 */
+ unsigned int reserved:7;
+ } bits;
+};
+
+/* GMAC RX FLTR
+ * GMAC0 Offset 0xA00C
+ * GMAC1 Offset 0xE00C
+ */
+union gmac_rx_fltr {
+ unsigned int bits32;
+ struct bit1_000c {
+ /* Enable receive of unicast frames that are sent to STA
+ * address
+ */
+ unsigned int unicast:1;
+ /* Enable receive of multicast frames that pass multicast
+ * filter
+ */
+ unsigned int multicast:1;
+ /* Enable receive of broadcast frames */
+ unsigned int broadcast:1;
+ /* Enable receive of all frames */
+ unsigned int promiscuous:1;
+ /* Enable receive of all error frames */
+ unsigned int error:1;
+ unsigned int reserved:27;
+ } bits;
+};
+
+/* GMAC Configuration 0
+ * GMAC0 Offset 0xA018
+ * GMAC1 Offset 0xE018
+ */
+union gmac_config0 {
+ unsigned int bits32;
+ struct bit1_0018 {
+ /* 0: disable transmit */
+ unsigned int dis_tx:1;
+ /* 1: disable receive */
+ unsigned int dis_rx:1;
+ /* 2: transmit data loopback enable */
+ unsigned int loop_back:1;
+ /* 3: flow control also trigged by Rx queues */
+ unsigned int flow_ctrl:1;
+ /* 4-7: adjust IFG from 96+/-56 */
+ unsigned int adj_ifg:4;
+ /* 8-10 maximum receive frame length allowed */
+ unsigned int max_len:3;
+ /* 11: disable back-off function */
+ unsigned int dis_bkoff:1;
+ /* 12: disable 16 collisions abort function */
+ unsigned int dis_col:1;
+ /* 13: speed up timers in simulation */
+ unsigned int sim_test:1;
+ /* 14: RX flow control enable */
+ unsigned int rx_fc_en:1;
+ /* 15: TX flow control enable */
+ unsigned int tx_fc_en:1;
+ /* 16: RGMII in-band status enable */
+ unsigned int rgmii_en:1;
+ /* 17: IPv4 RX Checksum enable */
+ unsigned int ipv4_rx_chksum:1;
+ /* 18: IPv6 RX Checksum enable */
+ unsigned int ipv6_rx_chksum:1;
+ /* 19: Remove Rx VLAN tag */
+ unsigned int rx_tag_remove:1;
+ /* 20 */
+ unsigned int rgmm_edge:1;
+ /* 21 */
+ unsigned int rxc_inv:1;
+ /* 22 */
+ unsigned int ipv6_exthdr_order:1;
+ /* 23 */
+ unsigned int rx_err_detect:1;
+ /* 24 */
+ unsigned int port0_chk_hwq:1;
+ /* 25 */
+ unsigned int port1_chk_hwq:1;
+ /* 26 */
+ unsigned int port0_chk_toeq:1;
+ /* 27 */
+ unsigned int port1_chk_toeq:1;
+ /* 28 */
+ unsigned int port0_chk_classq:1;
+ /* 29 */
+ unsigned int port1_chk_classq:1;
+ /* 30, 31 */
+ unsigned int reserved:2;
+ } bits;
+};
+
+#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0))
+#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17))
+#define CONFIG0_FLOW_RX BIT(14)
+#define CONFIG0_FLOW_TX BIT(15)
+#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15))
+#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15))
+
+#define CONFIG0_MAXLEN_SHIFT 8
+#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT)
+#define CONFIG0_MAXLEN_1536 0
+#define CONFIG0_MAXLEN_1518 1
+#define CONFIG0_MAXLEN_1522 2
+#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_9k 4 /* 9212 */
+#define CONFIG0_MAXLEN_10k 5 /* 10236 */
+#define CONFIG0_MAXLEN_1518__6 6
+#define CONFIG0_MAXLEN_1518__7 7
+
+/* GMAC Configuration 1
+ * GMAC0 Offset 0xA01C
+ * GMAC1 Offset 0xE01C
+ */
+union gmac_config1 {
+ unsigned int bits32;
+ struct bit1_001c {
+ /* Flow control set threshold */
+ unsigned int set_threshold:8;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:8;
+ unsigned int reserved:16;
+ } bits;
+};
+
+#define GMAC_FLOWCTRL_SET_MAX 32
+#define GMAC_FLOWCTRL_SET_MIN 0
+#define GMAC_FLOWCTRL_RELEASE_MAX 32
+#define GMAC_FLOWCTRL_RELEASE_MIN 0
+
+/* GMAC Configuration 2
+ * GMAC0 Offset 0xA020
+ * GMAC1 Offset 0xE020
+ */
+union gmac_config2 {
+ unsigned int bits32;
+ struct bit1_0020 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC Configuration 3
+ * GMAC0 Offset 0xA024
+ * GMAC1 Offset 0xE024
+ */
+union gmac_config3 {
+ unsigned int bits32;
+ struct bit1_0024 {
+ /* Flow control set threshold */
+ unsigned int set_threshold:16;
+ /* Flow control release threshold */
+ unsigned int rel_threshold:16;
+ } bits;
+};
+
+/* GMAC STATUS
+ * GMAC0 Offset 0xA02C
+ * GMAC1 Offset 0xE02C
+ */
+union gmac_status {
+ unsigned int bits32;
+ struct bit1_002c {
+ /* Link status */
+ unsigned int link:1;
+ /* Link speed(00->2.5M 01->25M 10->125M) */
+ unsigned int speed:2;
+ /* Duplex mode */
+ unsigned int duplex:1;
+ unsigned int reserved_1:1;
+ /* PHY interface type */
+ unsigned int mii_rmii:2;
+ unsigned int reserved_2:25;
+ } bits;
+};
+
+#define GMAC_SPEED_10 0
+#define GMAC_SPEED_100 1
+#define GMAC_SPEED_1000 2
+
+#define GMAC_PHY_MII 0
+#define GMAC_PHY_GMII 1
+#define GMAC_PHY_RGMII_100_10 2
+#define GMAC_PHY_RGMII_1000 3
+
+/* Queue Header
+ * (1) TOE Queue Header
+ * (2) Non-TOE Queue Header
+ * (3) Interrupt Queue Header
+ *
+ * memory Layout
+ * TOE Queue Header
+ * 0x60003000 +---------------------------+ 0x0000
+ * | TOE Queue 0 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0020
+ * | TOE Queue 1 Header |
+ * | 8 * 4 Bytes |
+ * +---------------------------+ 0x0040
+ * | ...... |
+ * | |
+ * +---------------------------+
+ *
+ * Non TOE Queue Header
+ * 0x60002000 +---------------------------+ 0x0000
+ * | Default Queue 0 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0008
+ * | Default Queue 1 Header |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x0010
+ * | Classification Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Classification Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (n * 8 + 0x10)
+ * | ... |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ (13 * 8 + 0x10)
+ * | Classification Queue 13 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+ 0x80
+ * | Interrupt Queue 0 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 1 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 2 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ * | Interrupt Queue 3 |
+ * | 2 * 4 Bytes |
+ * +---------------------------+
+ *
+ */
+#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32)
+#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
+#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
+#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10)
+#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80)
+#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
+#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
+
+/* NONTOE Queue Header Word 0 */
+union nontoe_qhdr0 {
+ unsigned int bits32;
+ unsigned int base_size;
+};
+
+#define NONTOE_QHDR0_BASE_MASK (~0x0f)
+
+/* NONTOE Queue Header Word 1 */
+union nontoe_qhdr1 {
+ unsigned int bits32;
+ struct bit_nonqhdr1 {
+ /* bit 15:0 */
+ unsigned int rptr:16;
+ /* bit 31:16 */
+ unsigned int wptr:16;
+ } bits;
+};
+
+/* Non-TOE Queue Header */
+struct nontoe_qhdr {
+ union nontoe_qhdr0 word0;
+ union nontoe_qhdr1 word1;
+};
+
+#endif /* _GEMINI_ETHERNET_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd5246dfae..1a49297224ed 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
- int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+ int num_entries = ARRAY_SIZE(cmd_priv_map);
u32 cmd_privileges = adapter->cmd_privileges;
for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..c36c81959198 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,14 @@ int be_update_queues(struct be_adapter *adapter)
be_schedule_worker(adapter);
+ /* The IF was destroyed and re-created. We need to clear
+ * all promiscuous flags valid for the destroyed IF.
+ * Without this promisc mode is not restored during
+ * be_open() because the driver thinks that it is
+ * already enabled in HW.
+ */
+ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
if (netif_running(netdev))
status = be_open(netdev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5385074b3b7d..e7381f8ef89d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -20,7 +20,8 @@
#include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -195,7 +196,7 @@
* Evidently, ARM SoCs have the FEC block generated in a
* little endian mode so adjust endianness accordingly.
*/
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
#define fec32_to_cpu le32_to_cpu
#define fec16_to_cpu le16_to_cpu
#define cpu_to_fec32 cpu_to_le32
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 610573855213..7a7f3a42b2aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -195,7 +195,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -818,6 +819,12 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
+ if (bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL;
@@ -1862,6 +1869,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
ret = clk_prepare_enable(fep->clk_ref);
if (ret)
goto failed_clk_ref;
+
+ phy_reset_after_clk_enable(ndev->phydev);
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
@@ -2101,7 +2110,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64)
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2834,6 +2844,7 @@ fec_enet_open(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ bool reset_again;
ret = pm_runtime_get_sync(&fep->pdev->dev);
if (ret < 0)
@@ -2844,6 +2855,17 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto clk_enable;
+ /* During the first fec_enet_open call the PHY isn't probed at this
+ * point. Therefore the phy_reset_after_clk_enable() call within
+ * fec_enet_clk_enable() fails. As we need this reset in order to be
+ * sure the PHY is working correctly we check if we need to reset again
+ * later when the PHY is probed
+ */
+ if (ndev->phydev && ndev->phydev->drv)
+ reset_again = false;
+ else
+ reset_again = true;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -2860,6 +2882,12 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+ phy_reset_after_clk_enable(ndev->phydev);
+
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
@@ -3113,7 +3141,7 @@ static int fec_enet_init(struct net_device *ndev)
unsigned dsize_log2 = __fls(dsize);
WARN_ON(dsize != (1 << dsize_log2));
-#if defined(CONFIG_ARM)
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
@@ -3463,6 +3491,10 @@ fec_probe(struct platform_device *pdev)
goto failed_regulator;
}
} else {
+ if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto failed_regulator;
+ }
fep->reg_phy = NULL;
}
@@ -3546,8 +3578,9 @@ failed_clk_ipg:
failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
-failed_phy:
of_node_put(phy_node);
+failed_phy:
+ dev_id--;
failed_ioremap:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0c6b5..2c2976a2dda6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+ timeout_work);
+ struct net_device *dev = fep->ndev;
unsigned long flags;
int wake = 0;
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(dev->phydev);
}
phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ schedule_work(&fep->timeout_work);
+}
+
/*-----------------------------------------------------------------------------
* generic link-change handler - should be sufficient for most cases
*-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
+ cancel_work_sync(&fep->timeout_work);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
+ INIT_WORK(&fep->timeout_work, fs_timeout_work);
netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b37a199..195fae6aec4a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi;
+ struct work_struct timeout_work;
const struct fs_ops *ops;
int rx_ring, tx_ring;
dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7f837006bb6a..3bdeb295514b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
struct sk_buff *skb, bool first)
{
- unsigned int size = lstatus & BD_LENGTH_MASK;
+ int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
@@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
if (last)
size -= skb->len;
- /* in case the last fragment consisted only of the FCS */
+ /* Add the last fragment if it contains something other than
+ * the FCS, otherwise drop it and trim off any part of the FCS
+ * that was already received.
+ */
if (size > 0)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset + RXBUF_ALIGNMENT,
size, GFAR_RXB_TRUESIZE);
+ else if (size < 0)
+ pskb_trim(skb, skb->len + size);
}
/* try reuse page */
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6aa7b8..8bcf470ff5f3 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@ config HNS3_HCLGE
compatibility layer. The engine would be used in Hisilicon hip08 family of
SoCs and further upcoming SoCs.
-config HNS3_ENET
- tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
- depends on HNS3 && HNS3_HCLGE
- ---help---
- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
- family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
- devices and their associated operations.
-
config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support"
default n
@@ -112,4 +103,23 @@ config HNS3_DCB
If unsure, say N.
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3
+ depends on HNS3_HCLGE
+ ---help---
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ depends on 64BIT && PCI
+ depends on HNS3
+ ---help---
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf490850..cac86e9ae0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_ctrl)
+ if (!mac_cb)
return 0;
return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63faf9a8..acf29633ec79 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
HNS_OP_LED_SET_FUNC = 0x3,
HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
};
enum _dsm_rst_type {
@@ -43,12 +44,17 @@ static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
{
- u32 ret;
-
- if (dsaf_dev->sub_ctrl)
- ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
- else
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
return ret;
}
@@ -81,6 +87,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
ACPI_FREE(obj);
}
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
{
@@ -160,15 +193,23 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value =
- dsaf_read_syscon(mac_cb->cpld_ctrl,
- mac_cb->cpld_ctrl_reg);
- dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
- CPLD_LED_ON_VALUE);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- mac_cb->cpld_led_value);
+ val);
+ mac_cb->cpld_led_value = val;
break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
@@ -184,6 +225,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
return 0;
}
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
#define RESET_REQ_OR_DREQ 1
static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -505,12 +570,19 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
+ u32 val = 0;
+ int ret;
+
if (!mac_cb->cpld_ctrl)
return -ENODEV;
- *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
- + MAC_SFP_PORT_OFFSET);
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+ *sfp_prsnt = !val;
return 0;
}
@@ -560,7 +632,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
- int sfp_prsnt;
+ int sfp_prsnt = 0;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!mac_cb->phy_dev) {
@@ -572,7 +644,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin;
+ u32 origin = 0;
if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
#define HILINK_ACCESS_SEL_CFG 0x40008
@@ -589,7 +661,10 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
HILINK_ACCESS_SEL_CFG, 3);
}
- origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
@@ -660,7 +735,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
misc_op->cpld_set_led = hns_cpld_set_led_acpi;
misc_op->cpld_reset_led = cpld_led_reset_acpi;
- misc_op->cpld_set_led_id = cpld_set_led_id;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
misc_op->dsaf_reset = hns_dsaf_rst_acpi;
misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 46a52d9bb196..886cbbf25761 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1034,12 +1034,9 @@ static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
regmap_write(base, reg, value);
}
-static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
{
- unsigned int val;
-
- regmap_read(base, reg, &val);
- return val;
+ return regmap_read(base, reg, val);
}
#define dsaf_read_dev(a, reg) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1f3e51..002534f12b66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000000..3e9203ea42a6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM 16
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 rsv1[2];
+ u8 msg_len;
+ u8 rsv2[3];
+ u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1[3];
+ u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb2238acb2..02145f2de820 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret = 0, lock_acquired;
+
+ /* we can get deadlocked if SRIOV is being enabled in context to probe
+ * and probe gets called again in same context. This can happen when
+ * pci_enable_sriov() is called to create VFs from PF probes context.
+ * Therefore, for simplicity uniformly defering further probing in all
+ * cases where we detect contention.
+ */
+ lock_acquired = mutex_trylock(&hnae3_common_lock);
+ if (!lock_acquired)
+ return -EPROBE_DEFER;
- mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+ ret = -EOPNOTSUPP;
goto out_err;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1039f2..fd06bc78c58e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -133,11 +133,16 @@ struct hnae3_vector_info {
#define HNAE3_RING_TYPE_B 0
#define HNAE3_RING_TYPE_TX 0
#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
u32 flag;
+ u32 int_gl_idx;
};
#define HNAE3_IS_TX_RING(node) \
@@ -274,10 +279,14 @@ struct hnae3_ae_dev {
* Get firmware version
* get_mdix_mode()
* Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
* set_vlan_filter()
* Set vlan filter config of Ports
* set_vf_vlan_filter()
* Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -347,7 +356,8 @@ struct hnae3_ae_ops {
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
- void (*get_regs)(struct hnae3_handle *handle, void *data);
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
@@ -380,12 +390,23 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle,
enum hnae3_reset_type reset);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *free_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
};
struct hnae3_dcb_ops {
@@ -435,6 +456,8 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
};
struct hnae3_roce_private_info {
@@ -452,9 +475,10 @@ struct hnae3_unic_private_info {
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
-#define HNAE3_SUPPORT_MAC_LOOPBACK 1
-#define HNAE3_SUPPORT_PHY_LOOPBACK 2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
struct hnae3_handle {
struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a7c50a..eb82700da7d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
{
struct net_device *dev = handle->kinfo.netdev;
- if (!handle->kinfo.dcb_ops)
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
return;
dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 59415090ff0f..601b6295d3f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
/* required last entry */
{0, }
};
@@ -156,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
napi_disable(&tqp_vector->napi);
}
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 gl_value)
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
{
- /* this defines the configuration for GL (Interrupt Gap Limiter)
- * GL defines inter interrupt gap.
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
- */
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
-}
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
- u32 rl_value)
-{
/* this defines the configuration for RL (Interrupt Rate Limiter).
* Rl defines rate of interrupts i.e. number of interrupts-per-second
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
+ !tqp_vector->rx_group.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_handle *h = priv->ae_handle;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*/
- /* Default :enable interrupt coalesce */
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+ /* Default: enable interrupt coalescing self-adaptive and GL */
+ tqp_vector->tx_group.gl_adapt_enable = 1;
+ tqp_vector->rx_group.gl_adapt_enable = 1;
+
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
- /* for now we are disabling Interrupt RL - we
- * will re-enable later
- */
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.int_gl);
+
+ /* Default: disable RL */
+ h->kinfo.int_rl_setting = 0;
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
@@ -245,6 +272,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
goto out_start_err;
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
return 0;
out_start_err:
@@ -284,6 +313,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops;
int i;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -721,6 +753,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+ struct hns3_enet_ring *tx_ring,
+ u32 *inner_vlan_flag,
+ u32 *out_vlan_flag,
+ u16 *inner_vtag,
+ u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->tqp->handle->kinfo.netdev->features &
+ NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ u16 vlan_tag;
+
+ vlan_tag = skb_vlan_tag_get(skb);
+ vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
+ } else if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+ << HNS3_TX_VLAN_PRIO_SHIFT);
+ }
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type)
@@ -731,6 +815,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 bdtp_fe_sc_vld_ra_ri = 0;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
u32 paylen = 0;
u16 mss = 0;
__be16 protocol;
@@ -754,15 +840,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
skb = (struct sk_buff *)priv;
paylen = skb->len;
+ ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec,
+ &inner_vtag, &out_vtag);
+ if (unlikely(ret))
+ return ret;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
protocol = skb->protocol;
- /* vlan packet*/
- if (protocol == htons(ETH_P_8021Q)) {
- protocol = vlan_get_protocol(skb);
- skb->protocol = protocol;
- }
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret)
return ret;
@@ -788,6 +875,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
}
/* move ring pointer to next.*/
@@ -1029,25 +1118,50 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = hns3_fill_desc_tso;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+ } else {
+ priv->ops.fill_desc = hns3_fill_desc;
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ else
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+ else
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+ if (ret)
+ return ret;
}
netdev->features = features;
return 0;
}
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring;
unsigned int start;
unsigned int idx;
@@ -1055,6 +1169,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0;
u64 tx_pkts = 0;
u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
@@ -1063,6 +1184,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.tx_busy;
+ tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1071,6 +1194,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.non_vld_descs;
+ rx_drop += ring->stats.err_pkt_len;
+ rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
}
@@ -1086,8 +1212,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors;
- stats->rx_dropped = netdev->stats.rx_dropped;
- stats->tx_dropped = netdev->stats.tx_dropped;
+ stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+ stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1443,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+ netdev->mtu = new_mtu;
+
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL;
@@ -1476,6 +1604,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev)
{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1620,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1634,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF))
+ netdev->hw_features |=
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2218,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data);
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(vlan_tag & VLAN_VID_MASK))
+ vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2301,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
{
- u16 rx_int_gl, tx_int_gl;
- bool rx, tx;
-
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
- rx_int_gl = tqp_vector->rx_group.int_gl;
- tx_int_gl = tqp_vector->tx_group.int_gl;
- if (rx && tx) {
- if (rx_int_gl > tx_int_gl) {
- tqp_vector->tx_group.int_gl = rx_int_gl;
- tqp_vector->tx_group.flow_level =
- tqp_vector->rx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
- } else {
- tqp_vector->rx_group.int_gl = tx_int_gl;
- tqp_vector->rx_group.flow_level =
- tqp_vector->tx_group.flow_level;
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
- }
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ if (rx_group->gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->int_gl);
+ }
+
+ if (tx_group->gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->int_gl);
}
}
@@ -2380,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL;
@@ -2395,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
+ hnae_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
cur_chain = chain;
}
@@ -2406,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
}
@@ -2419,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
cur_chain = chain;
rx_ring = rx_ring->next;
@@ -2507,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0;
- hns3_vector_gl_rl_init(tqp_vector);
+ hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector,
@@ -2649,6 +2808,19 @@ err:
return ret;
}
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+ devm_kfree(priv->dev, priv->ring_data);
+}
+
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
int ret;
@@ -2785,8 +2957,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring);
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
}
+ devm_kfree(priv->dev, priv->ring_data);
return 0;
}
@@ -3142,8 +3318,8 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
switch (type) {
case HNAE3_UP_CLIENT:
- ret = hns3_reset_notify_up_enet(handle);
- break;
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
case HNAE3_DOWN_CLIENT:
ret = hns3_reset_notify_down_enet(handle);
break;
@@ -3160,6 +3336,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 free_tqps, max_rss_size, max_tqps;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+ max_tqps = h->kinfo.num_tc * max_rss_size;
+
+ return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+ if (ret)
+ return ret;
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_put_ring;
+
+ return 0;
+
+err_put_ring:
+ hns3_put_ring_config(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+ return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool if_running = netif_running(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+ new_tqp_num < kinfo->num_tc) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from %d to %d",
+ kinfo->num_tc,
+ hns3_get_max_available_channels(netdev));
+ return -EINVAL;
+ }
+
+ new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+ if (kinfo->num_tqps == new_tqp_num)
+ return 0;
+
+ if (if_running)
+ dev_close(netdev);
+
+ hns3_clear_all_ring(h);
+
+ ret = hns3_nic_uninit_vector_data(priv);
+ if (ret) {
+ dev_err(&netdev->dev,
+ "Unbind vector with tqp fail, nothing is changed");
+ goto open_netdev;
+ }
+
+ hns3_uninit_all_ring(priv);
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ if (ret) {
+ ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ if (ret) {
+ /* If revert to old tqp failed, fatal error occurred */
+ dev_err(&netdev->dev,
+ "Revert to old tqp num fail, ret=%d", ret);
+ return ret;
+ }
+ dev_info(&netdev->dev,
+ "Change tqp num fail, Revert to old tqp num");
+ }
+
+open_netdev:
+ if (if_running)
+ dev_open(netdev);
+
+ return ret;
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de759957b..213f501b30bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -451,10 +451,14 @@ enum hns3_link_mode_bits {
HNS3_LM_COUNT = 15
};
-#define HNS3_INT_GL_50K 0x000A
-#define HNS3_INT_GL_20K 0x0019
-#define HNS3_INT_GL_18K 0x001B
-#define HNS3_INT_GL_8K 0x003E
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_ring_group {
/* array of pointers to rings */
@@ -464,6 +468,7 @@ struct hns3_enet_ring_group {
u16 count;
enum hns3_flow_level_range flow_level;
u16 int_gl;
+ u8 gl_adapt_enable;
};
struct hns3_enet_tqp_vector {
@@ -594,7 +599,15 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
@@ -604,6 +617,13 @@ int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c72da3..b034c7f24eda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
struct hns3_stats {
char stats_string[ETH_GSTRING_LEN];
- int stats_size;
int stats_offset;
};
/* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \
- .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
- .stats_offset = offsetof(struct hns3_enet_ring, stats), \
-} \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("tx_pkts", tx_pkts),
- HNS3_TQP_STAT("tx_bytes", tx_bytes),
- HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
- HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
};
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
- HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
- HNS3_TQP_STAT("rx_pkts", rx_pkts),
- HNS3_TQP_STAT("rx_bytes", rx_bytes),
- HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
- HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
- HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
- HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
- HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
- HNS3_TQP_STAT("rx_l2_err", l2_err),
- HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+ HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps)
+ u32 stat_count, u32 num_tqps, const char *prefix)
{
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
u32 size_left;
u32 i, j;
u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
/* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps);
+ kinfo->num_tqps, rx_prefix);
return data;
}
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring;
u8 *stat;
- u32 i;
+ int i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring;
- for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
- for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
- stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
}
}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
&param->rx_pause, &param->tx_pause);
}
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 flowctrl_adv = 0;
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
+ advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
/* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+ /* 5.get flow control setttings */
+ if (h->ae_algo->ops->get_flowctrl_adv)
+ h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+ if (flowctrl_adv & ADVERTISED_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Pause);
+
+ if (flowctrl_adv & ADVERTISED_Asym_Pause)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+
return 0;
}
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+ cmd->data = h->kinfo.rss_size;
break;
case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,241 @@ static int hns3_nway_reset(struct net_device *netdev)
return genphy_restart_aneg(phy);
}
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %d! Queue max id=%d\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring_data[queue].ring->tqp_vector;
+ rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+
+ tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+};
+
static const struct ethtool_ops hns3_ethtool_ops = {
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +1120,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
@@ -868,9 +1133,21 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &hns3_ethtool_ops;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d01a58c..cb8ddd043476 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
#
# Makefile for the HISILICON network device drivers.
#
@@ -5,11 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed8845042..3fd10a6bec53 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -102,6 +102,10 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
/* Device management command */
/* MAC commond */
@@ -111,6 +115,7 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */
/* PFC/Pause CMD*/
@@ -180,6 +185,10 @@ enum hclge_opcode_type {
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+ /* Vlan offload command */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
/* Interrupts cmd */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
@@ -191,6 +200,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table cmd */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
@@ -218,6 +228,9 @@ enum hclge_opcode_type {
/* Mailbox cmd */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -399,6 +412,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -549,8 +564,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-#define HCLGE_MAC_MIN_MTU 64
-#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -587,6 +600,37 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
+#define HCLGE_VLAN_MASK_EN_B 0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+ u8 rsv0[2];
+ u8 vlan_mask;
+ u8 rsv1;
+ u8 mac_mask[6];
+ u8 rsv2[14];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+#define HCLGE_MAC_ETHERTYPE_LLDP 0x88cc
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
@@ -658,6 +702,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_ACCEPT_TAG_B 0
+#define HCLGE_ACCEPT_UNTAG_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[8];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
@@ -726,6 +811,23 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_LED_PORT_SPEED_STATE_S 0
+#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
+#define HCLGE_LED_ACTIVITY_STATE_S 0
+#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LINK_STATE_S 0
+#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 port_speed_led_config;
+ u8 link_led_config;
+ u8 activity_led_config;
+ u8 locate_led_config;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806a52c3..32bc6f68e297 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
+#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hnae3.h"
@@ -34,8 +36,10 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -278,8 +282,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +296,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
@@ -314,8 +334,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_overrsize_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +348,59 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_max_oct_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
-
- {"mac_trans_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
- {"mac_trans_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
- {"mac_trans_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
- {"mac_trans_err_all_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
- {"mac_trans_from_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
- {"mac_trans_from_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
- {"mac_rcv_fragment_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
- {"mac_rcv_undermin_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
- {"mac_rcv_jabber_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
- {"mac_rcv_fcs_err_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
- {"mac_rcv_send_app_good_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
- {"mac_rcv_send_app_bad_pkt_num",
- HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
+ .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
+ .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .i_port_bitmap = 0x1,
+ },
};
static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -460,9 +506,41 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0;
}
+static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
+{
+ struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
+ struct hclge_desc desc;
+ __le64 *desc_data;
+ int ret;
+
+ /* for fiber port, need to query the total rx/tx packets statstics,
+ * used for data transferring checking.
+ */
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC total pkt stats fail, ret = %d\n", ret);
+
+ return ret;
+ }
+
+ desc_data = (__le64 *)(&desc.data[0]);
+ mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
+ mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
+
+ return 0;
+}
+
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +602,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +622,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[1]);
}
return 0;
@@ -586,7 +664,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -594,7 +672,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -642,23 +720,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
- net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
- net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
- net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+ net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors +=
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors =
- hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+ hw_stats->mac_stats.mac_rx_oversize_pkt_num;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +775,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status;
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -723,6 +803,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status);
hclge_update_netstat(hw_stats, net_stats);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1063,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1058,7 +1144,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->num_vmdq_vport = cfg.vmdq_vport_num;
hdev->base_tqp_pid = 0;
- hdev->rss_size_max = 1;
+ hdev->rss_size_max = cfg.rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1181,7 @@ static int hclge_configure(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
- if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
- else
- hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
return ret;
}
@@ -2132,28 +2215,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return 0;
}
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
- struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query cmd failed %d.\n", ret);
- return ret;
- }
-
- mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
- return 0;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2250,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
- hclge_query_autoneg_result(hdev);
+ if (phydev)
+ return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+ bool mask_vlan,
+ u8 *mac_mask)
+{
+ struct hclge_mac_vlan_mask_entry_cmd *req;
+ struct hclge_desc desc;
+ int status;
+
+ req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+ hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+ mask_vlan ? 1 : 0);
+ ether_addr_copy(req->mac_mask, mac_mask);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
+ u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int mtu;
int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2314,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
- return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mta filter mode fail ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set default mac_vlan_mask fail ret=%d\n", ret);
+ return ret;
+ }
+
+ if (netdev)
+ mtu = netdev->mtu;
+ else
+ mtu = ETH_DATA_LEN;
+
+ ret = hclge_set_mtu(handle, mtu);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->rst_service_task);
}
static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2479,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev);
}
@@ -2362,6 +2492,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 rst_src_reg;
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ */
+
+ /* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ }
+}
+
static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
{
writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2558,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ u32 event_cause;
+ u32 clearval;
hclge_enable_vector(&hdev->misc_vector, false);
- if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->service_task);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+
+ default:
+ dev_dbg(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ /* we should clear the source of interrupt */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+ hclge_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2404,9 +2620,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
- ret = devm_request_irq(&hdev->pdev->dev,
- hdev->misc_vector.vector_irq,
- hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ /* this would be explicitly freed in the end */
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, "hclge_misc", hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2632,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
return ret;
}
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
static int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
@@ -2471,12 +2693,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
cnt++;
}
- /* must clear reset status register to
- * prevent driver detect reset interrupt again
- */
- reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
if (cnt >= HCLGE_RESET_WAIT_CNT) {
dev_warn(&hdev->pdev->dev,
"Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2721,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
return ret;
}
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
u32 val;
- switch (type) {
+ switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2742,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
hclge_func_reset_cmd(hdev, 0);
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", type);
+ "Unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+ unsigned long *addr)
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
- u32 rst_reg_val;
- rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
- if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_GLOBAL_RESET, addr))
rst_level = HNAE3_GLOBAL_RESET;
- else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_CORE_RESET, addr))
rst_level = HNAE3_CORE_RESET;
- else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ else if (test_bit(HNAE3_IMP_RESET, addr))
rst_level = HNAE3_IMP_RESET;
+ else if (test_bit(HNAE3_FUNC_RESET, addr))
+ rst_level = HNAE3_FUNC_RESET;
+
+ /* now, clear all other resets */
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
return rst_level;
}
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ /* perform reset of the stack & ae device for a client */
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (!hclge_reset_wait(hdev)) {
+ rtnl_lock();
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ rtnl_unlock();
+ } else {
+ /* schedule again to check pending resets later */
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ }
+
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static void hclge_reset_event(struct hnae3_handle *handle,
enum hnae3_reset_type reset)
{
@@ -2563,14 +2811,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
case HNAE3_FUNC_RESET:
case HNAE3_CORE_RESET:
case HNAE3_GLOBAL_RESET:
- if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
- dev_err(&hdev->pdev->dev, "Already in reset state");
- return;
- }
- hdev->reset_type = reset;
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- schedule_work(&hdev->service_task);
+ /* request reset & schedule reset task */
+ set_bit(reset, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2823,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
static void hclge_reset_subtask(struct hclge_dev *hdev)
{
- bool do_reset;
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
- do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
- /* Reset is detected by interrupt */
- if (hdev->reset_type == HNAE3_NONE_RESET)
- hdev->reset_type = hclge_detected_reset_event(hdev);
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
- if (hdev->reset_type == HNAE3_NONE_RESET)
+static void hclge_reset_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, rst_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- case HNAE3_CORE_RESET:
- case HNAE3_GLOBAL_RESET:
- case HNAE3_IMP_RESET:
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
- if (do_reset)
- hclge_do_reset(hdev, hdev->reset_type);
- else
- set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ hclge_reset_subtask(hdev);
- if (!hclge_reset_wait(hdev)) {
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
- }
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- break;
- default:
- dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
- hdev->reset_type);
- break;
- }
- hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
}
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_mailbox_service_task(struct work_struct *work)
{
- hclge_reset_subtask(hdev);
- hclge_enable_vector(&hdev->misc_vector, true);
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2879,20 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
- hclge_misc_irq_service_task(hdev);
+ /* The total rx/tx packets statstics are wanted to be updated
+ * per second. Both hclge_update_stats_for_all() and
+ * hclge_mac_get_traffic_stats() can do it.
+ */
+ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+ hclge_update_stats_for_all(hdev);
+ hdev->hw_stats.stats_timer = 0;
+ } else {
+ hclge_mac_get_traffic_stats(hdev);
+ }
+
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
- hclge_update_stats_for_all(hdev);
+ hclge_update_led_status(hdev);
hclge_service_complete(hdev);
}
@@ -3174,49 +3433,53 @@ err:
return ret;
}
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
- struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- int ret;
+ struct hclge_ctrl_vector_chain_cmd *req
+ = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
int i;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
+ hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
- ret);
- return ret;
+ status);
+ return -EIO;
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ op,
false);
req->int_vector_id = vector_id;
}
@@ -3224,21 +3487,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
if (i > 0) {
req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
dev_err(&hdev->pdev->dev,
- "Map TQP fail, status is %d.\n", ret);
- return ret;
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
}
}
return 0;
}
-static int hclge_map_handle_ring_to_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3510,20 @@ static int hclge_map_handle_ring_to_vector(
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. ret =%d\n", vector_id);
+ "Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
- return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_from_vector(
- struct hnae3_handle *handle, int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain_cmd *req;
- struct hnae3_ring_chain_node *node;
- struct hclge_desc desc;
- int i, vector_id;
- int ret;
+ int vector_id, ret;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
@@ -3273,54 +3532,17 @@ static int hclge_unmap_ring_from_vector(
return vector_id;
}
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
- req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
- req->int_vector_id = vector_id;
-
- i = 0;
- for (node = ring_chain; node; node = node->next) {
- u16 type_and_id = 0;
-
- hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
- node->tqp_index);
- hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
- HCLGE_INT_GL_IDX_S,
- hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
- req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
- req->vfid = vport->vport_id;
-
- if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
- req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n",
- ret);
- return ret;
- }
- i = 0;
- hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_DEL_RING_TO_VECTOR,
- false);
- req->int_vector_id = vector_id;
- }
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
}
- if (i > 0) {
- req->int_cause_num = i;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Unmap TQP fail, status is %d.\n", ret);
- return ret;
- }
- }
+ /* Free this MSIX or MSI vector */
+ hclge_free_vector(hdev, vector_id);
return 0;
}
@@ -4077,6 +4299,91 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
return status;
}
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%d.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4090,6 +4397,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
/* mac addr check */
if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4409,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL;
}
- hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail, ret =%d.\n",
+ ret);
- if (!hclge_add_uc_addr(handle, new_addr)) {
- ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
- return 0;
+ ret = hclge_add_uc_addr(handle, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add uc mac address fail, ret =%d.\n",
+ ret);
+
+ ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore uc mac address fail, ret =%d.\n",
+ ret);
+ }
+
+ return -EIO;
}
- return -EIO;
+ ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "configure mac pause address fail, ret =%d.\n",
+ ret);
+ return -EIO;
+ }
+
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+ return 0;
}
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4467,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0;
}
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
@@ -4250,43 +4594,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
}
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+ vcfg->accept_tag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+ vcfg->accept_untag ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ req->vf_bitmap[req->vf_offset] =
+ 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
-#define HCLGE_VLAN_TYPE_VF_TABLE 0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
struct hnae3_handle *handle;
+ struct hclge_vport *vport;
int ret;
+ int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
if (ret)
return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
- true);
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
if (ret)
return ret;
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->txvlan_cfg.accept_tag = true;
+ vport->txvlan_cfg.accept_untag = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_rx_offload_cfg(vport);
+ if (ret)
+ return ret;
+ }
+
handle = &hdev->vport[0].nic;
return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ int max_frm_size;
int ret;
- if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL;
- hdev->mps = new_mtu;
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(new_mtu);
+ req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -4294,6 +4799,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret;
}
+ hdev->mps = max_frm_size;
+
return 0;
}
@@ -4341,7 +4848,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4899,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+ u32 *flowctrl_adv)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+ (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (rx_en)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+ if (tx_en)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+ return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising = 0;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ if (phydev->advertising & ADVERTISED_Pause)
+ local_advertising = ADVERTISE_PAUSE_CAP;
+
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
u32 *rx_en, u32 *tx_en)
{
@@ -4421,6 +5022,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ /* Only support flow control negotiation for netdev with
+ * phy attached for now.
+ */
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ if (!fc_autoneg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ return phy_start_aneg(phydev);
+}
+
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex)
{
@@ -4661,6 +5297,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_request = 0;
+ hdev->reset_pending = 0;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4768,16 +5406,28 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ return ret;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+ INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -4889,25 +5539,471 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
+ if (hdev->rst_service_task.func)
+ cancel_work_sync(&hdev->rst_service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
- hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
+ hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
}
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *free_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 temp_tqps = 0;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ if (!hdev->htqp[i].alloced)
+ temp_tqps++;
+ }
+ *free_tqps = temp_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+ tqp->q.handle = NULL;
+ tqp->q.tqp_index = 0;
+ tqp->alloced = false;
+ }
+
+ devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+ kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int cur_rss_size = kinfo->rss_size;
+ int cur_tqps = kinfo->num_tqps;
+ u16 tc_offset[HCLGE_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 tc_size[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ int ret, i;
+
+ hclge_release_tqp(vport);
+
+ ret = hclge_knic_setup(vport, new_tqps_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return -EOPNOTSUPP;
+ }
+
+ return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+
+ data = (u32 *)data + regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
+ data);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
+ u8 act_led_status, u8 link_led_status,
+ u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
+ HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
+ hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
+ HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
+ hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
+ HCLGE_LED_LINK_STATE_S, link_led_status);
+ hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+#define BLINK_FREQUENCY 2
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret = 0;
+
+ if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = hclge_set_led_status_sfp(hdev,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_NO_CHANGE,
+ HCLGE_LED_OFF);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+enum hclge_led_port_speed {
+ HCLGE_SPEED_LED_FOR_1G,
+ HCLGE_SPEED_LED_FOR_10G,
+ HCLGE_SPEED_LED_FOR_25G,
+ HCLGE_SPEED_LED_FOR_40G,
+ HCLGE_SPEED_LED_FOR_50G,
+ HCLGE_SPEED_LED_FOR_100G,
+};
+
+static u8 hclge_led_get_speed_status(u32 speed)
+{
+ u8 speed_led;
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_1G:
+ speed_led = HCLGE_SPEED_LED_FOR_1G;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_led = HCLGE_SPEED_LED_FOR_10G;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_led = HCLGE_SPEED_LED_FOR_25G;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_led = HCLGE_SPEED_LED_FOR_40G;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_led = HCLGE_SPEED_LED_FOR_50G;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_led = HCLGE_SPEED_LED_FOR_100G;
+ break;
+ default:
+ speed_led = HCLGE_LED_NO_CHANGE;
+ }
+
+ return speed_led;
+}
+
+static int hclge_update_led_status(struct hclge_dev *hdev)
+{
+ u8 port_speed_status, link_status, activity_status;
+ u64 rx_pkts, tx_pkts;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return 0;
+
+ port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
+
+ rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
+ tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
+ if (rx_pkts != hdev->rx_pkts_for_led ||
+ tx_pkts != hdev->tx_pkts_for_led)
+ activity_status = HCLGE_LED_ON;
+ else
+ activity_status = HCLGE_LED_OFF;
+ hdev->rx_pkts_for_led = rx_pkts;
+ hdev->tx_pkts_for_led = tx_pkts;
+
+ if (hdev->hw.mac.link)
+ link_status = HCLGE_LED_ON;
+ else
+ link_status = HCLGE_LED_OFF;
+
+ return hclge_set_led_status_sfp(hdev, port_speed_status,
+ activity_status, link_status,
+ HCLGE_LED_NO_CHANGE);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
- .map_ring_to_vector = hclge_map_handle_ring_to_vector,
- .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback,
@@ -4934,6 +6030,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
@@ -4942,9 +6039,18 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_flowctrl_adv = hclge_get_flowctrl_adv,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814ea5d7..d99a76a9557c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_VF_NUM_PER_BYTE 8
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_GLOBAL_RESET_REG 0x20A00
@@ -92,6 +96,16 @@
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_REMOVING,
HCLGE_STATE_SERVICE_INITED,
HCLGE_STATE_SERVICE_SCHED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
- HCLGE_STATE_MBX_IRQ,
- HCLGE_STATE_RESET_INT,
+ HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX
};
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
#define HCLGE_MPF_ENBALE 1
struct hclge_caps {
u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
u8 tc_num;
u16 tqp_desc_num;
u16 rx_buf_len;
+ u16 rss_size_max;
u8 phy_addr;
u8 media_type;
u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num;
- u64 mac_tx_overrsize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num;
- u64 mac_tx_1519_max_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+ u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num;
- u64 mac_rx_overrsize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num;
- u64 mac_rx_1519_max_oct_pkt_num;
-
- u64 mac_trans_fragment_pkt_num;
- u64 mac_trans_undermin_pkt_num;
- u64 mac_trans_jabber_pkt_num;
- u64 mac_trans_err_all_pkt_num;
- u64 mac_trans_from_app_good_pkt_num;
- u64 mac_trans_from_app_bad_pkt_num;
- u64 mac_rcv_fragment_pkt_num;
- u64 mac_rcv_undermin_pkt_num;
- u64 mac_rcv_jabber_pkt_num;
- u64 mac_rcv_fcs_err_pkt_num;
- u64 mac_rcv_send_app_good_pkt_num;
- u64 mac_rcv_send_app_bad_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+ u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
};
+#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats;
+ u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
};
struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
unsigned long state;
enum hnae3_reset_type reset_type;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
+ struct work_struct rst_service_task;
+ struct work_struct mbx_service_task;
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
@@ -493,6 +548,29 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ u64 rx_pkts_for_led;
+ u64 tx_pkts_for_led;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag; /* Whether accept tagged packet from host */
+ bool accept_untag; /* Whether accept untagged packet from host */
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ bool strip_tag1_en; /* Whether strip inner vlan tag */
+ bool strip_tag2_en; /* Whether strip outer vlan tag */
+ bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+ bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_vport {
@@ -507,6 +585,9 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
@@ -529,8 +610,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable);
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
- struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +627,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000000..f38fc5ce9f51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ int resp_status,
+ u8 *resp_data, u16 resp_data_len)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ resp_data_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+ resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+ resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+ resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+ if (resp_data && resp_data_len > 0)
+ memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send response to VF\n", status);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg[0] = mbx_opcode;
+
+ memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(=%d) to send mailbox message to VF\n",
+ status);
+
+ return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kzfree(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg[2];
+
+ hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[5]);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+ hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+ HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 2]);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg[1];
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+ if (ret)
+ return ret;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ bool en = req->msg[1] ? true : false;
+ struct hclge_promisc_param param;
+
+ /* always enable broadcast promisc bit */
+ hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+ return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+ hclge_rm_uc_addr_common(vport, old_addr);
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_add_uc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_rm_uc_addr_common(vport, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ status = hclge_add_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ status = hclge_rm_mc_addr_common(vport, mac_addr);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+ u8 func_id = vport->vport_id;
+ bool enable = mbx_req->msg[2];
+
+ status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %d\n",
+ mbx_req->msg[1]);
+ return -EIO;
+ }
+
+ if (gen_resp)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int status = 0;
+
+ if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+ u16 vlan, proto;
+ bool is_kill;
+
+ is_kill = !!mbx_req->msg[2];
+ memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+ memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ is_kill, vlan, 0,
+ cpu_to_be16(proto));
+ }
+
+ if (gen_resp)
+ status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+ sizeof(u8));
+
+ return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 8
+ u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+ memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+ memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+ u8 msg_data[2];
+ u8 dest_vfid;
+
+ /* mac.link can only be 0 or 1 */
+ link_status = (u16)hdev->hw.mac.link;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id;
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+ hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_vport *vport;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* handle all the mailbox requests in the queue */
+ while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+ HCLGE_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ vport = &hdev->vport[req->mbx_src_vfid];
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+ req);
+ break;
+ case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+ req);
+ break;
+ case HCLGE_MBX_SET_PROMISC_MODE:
+ ret = hclge_set_vf_promisc_mode(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF promisc mode\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+ ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_MULTICAST:
+ ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+ ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_QINFO:
+ ret = hclge_get_vf_queue_info(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get Q info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_TCINFO:
+ ret = hclge_get_vf_tcinfo(vport, req, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get TC info for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_LINK_STATUS:
+ ret = hclge_get_link_info(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to get link stat for VF\n",
+ ret);
+ break;
+ case HCLGE_MBX_QUEUE_RESET:
+ hclge_reset_vf_queue(vport, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %d\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e9408d7d..c1dea3a47bdd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
SUPPORTED_Pause | \
+ SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
}
int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5497cb..36bd79a77940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
HCLGE_SHAPER_LVL_PF = 1,
};
-#define HCLGE_SHAPER_BS_U_DEF 1
-#define HCLGE_SHAPER_BS_S_DEF 4
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0;
}
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
{
struct hclge_desc desc;
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+ trans_time);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{
u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret;
u8 i;
- if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
- return hclge_mac_pause_setup_hw(hdev);
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_mac_pause_param_setup_hw(hdev);
+ }
/* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961918ab..5401e7559437 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap;
};
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+};
+
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000000..fb93bbd35845
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000000..85985e731311
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used;
+
+ used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ u16 ntc = csq->next_to_clean;
+ struct hclgevf_desc *desc;
+ int clean = 0;
+ u32 head;
+
+ desc = &csq->desc[ntc];
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+ u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+ size, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ hclgevf_ring_to_dma_dir(ring));
+
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+ struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_hw *hw = &hdev->hw;
+ int ring_type = ring->flag;
+ u32 reg_val;
+ int ret;
+
+ ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ spin_lock_init(&ring->lock);
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->dev = hdev;
+
+ /* allocate CSQ/CRQ descriptor */
+ ret = hclgevf_alloc_cmd_desc(ring);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+ (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+ return ret;
+ }
+
+ /* initialize the hardware registers with csq/crq dma-address,
+ * descriptor number, head & tail pointers
+ */
+ switch (ring_type) {
+ case HCLGEVF_TYPE_CSQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ break;
+ case HCLGEVF_TYPE_CRQ:
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ break;
+ }
+
+ return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode, bool is_read)
+{
+ memset(desc, 0, sizeof(struct hclgevf_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+ HCLGEVF_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int status = 0;
+ u16 retval;
+ u16 opcode;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /* Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ opcode = le16_to_cpu(desc[0].opcode);
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (hclgevf_cmd_csq_done(hw)) {
+ complete = true;
+ handle = 0;
+
+ while (handle < num) {
+ /* Get the result of hardware write back */
+ desc_to_use = &hw->cmq.csq.desc[ntc];
+ desc[handle] = *desc_to_use;
+
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ retval = le16_to_cpu(desc[handle].retval);
+ else
+ retval = le16_to_cpu(desc[0].retval);
+
+ if ((enum hclgevf_cmd_return_status)retval ==
+ HCLGEVF_CMD_EXEC_SUCCESS)
+ status = 0;
+ else
+ status = -EIO;
+ hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+ ntc++;
+ handle++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ status = -EAGAIN;
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle != num) {
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ }
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return status;
+}
+
+static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+ u32 *version)
+{
+ struct hclgevf_query_version_cmd *resp;
+ struct hclgevf_desc desc;
+ int status;
+
+ resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ status = hclgevf_cmd_send(hw, &desc, 1);
+ if (!status)
+ *version = le32_to_cpu(resp->firmware);
+
+ return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ /* setup Tx write back timeout */
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+ /* setup queue CSQ/CRQ rings */
+ hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CSQ ring\n", ret);
+ return ret;
+ }
+
+ hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+ ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize CRQ ring\n", ret);
+ goto err_csq;
+ }
+
+ /* get firmware version */
+ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+ goto err_crq;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
+err_crq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+ return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000000..2caca9317f8c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT 200
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+ dma_addr_t dma;
+ void *va;
+ u32 length;
+};
+
+struct hclgevf_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclgevf_desc *desc;
+ struct hclgevf_desc_cb *desc_cb;
+ struct hclgevf_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+ HCLGEVF_CMD_EXEC_SUCCESS = 0,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_EXEC = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+};
+
+enum hclgevf_cmd_status {
+ HCLGEVF_STATUS_SUCCESS = 0,
+ HCLGEVF_ERR_CSQ_FULL = -1,
+ HCLGEVF_ERR_CSQ_TIMEOUT = -2,
+ HCLGEVF_ERR_CSQ_ERROR = -3
+};
+
+struct hclgevf_cmq {
+ struct hclgevf_cmq_ring csq;
+ struct hclgevf_cmq_ring crq;
+ u16 tx_timeout; /* Tx timeout */
+ enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+ /* Generic command */
+ HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ /* TQP command */
+ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* RSS cmd */
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
+ /* Mailbox cmd */
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+ __le32 firmware;
+ __le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_HASH_KEY_NUM 16
+struct hclgevf_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_stcp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_stcp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE 16
+
+struct hclgevf_rss_indirection_table_cmd {
+ u16 start_table_index;
+ u16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S 0
+#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S 12
+#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B 15
+#define HCLGEVF_MAX_TC_NUM 8
+struct hclgevf_rss_tc_mode_cmd {
+ u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ 0
+#define HCLGEVF_TYPE_CSQ 1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+#define HCLGEVF_NIC_CMQ_EN_B 16
+#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+ hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+ hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+ ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode,
+ bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000000..0d89965f7928
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+ struct hnae3_handle *handle)
+{
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_desc desc;
+ struct hclgevf_tqp *tqp;
+ int status;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_QUERY_RX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *buff = data;
+ int i = 0;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclgevf_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclgevf_tqps_get_sset_count(handle, strset);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg;
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+ true, &resp_msg, sizeof(u8));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get TC info from PF failed %d",
+ status);
+ return status;
+ }
+
+ hdev->hw_tc_map = resp_msg;
+
+ return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 8
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+ true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.desc_num = hdev->num_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ int i;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tc = 0;
+ kinfo->num_desc = hdev->num_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ kinfo->num_tc++;
+
+ kinfo->rss_size
+ = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+ new_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ int status;
+ u8 resp_msg;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+ 0, false, &resp_msg, sizeof(u8));
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *client;
+
+ client = handle->client;
+
+ if (link_state != hdev->hw.mac.link) {
+ client->ops->link_status_change(handle, !!link_state);
+ hdev->hw.mac.link = link_state;
+ }
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+
+ if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+ dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+ hdev->ae_dev->dev_type);
+ return -EINVAL;
+ }
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+ const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+ struct hclgevf_rss_indirection_table_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+ int i, j;
+
+ req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+ false);
+ req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+ req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set RSS indirection table\n",
+ status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
+{
+ struct hclgevf_rss_tc_mode_cmd *req;
+ u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+ u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+ u16 tc_size[HCLGEVF_MAX_TC_NUM];
+ struct hclgevf_desc desc;
+ u16 roundup_size;
+ int status;
+ int i;
+
+ req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ }
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set rss tc mode\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+ u8 *key)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_config_cmd *req;
+ int lkup_times = key ? 3 : 1;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int status;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+ lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+ for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ true);
+ req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get hardware RSS cfg, status = %d\n",
+ status);
+ return status;
+ }
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ if (key)
+ memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+ req->hash_key,
+ key_size);
+ }
+
+ if (hash) {
+ if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+ *hash = ETH_RSS_HASH_TOP;
+ else
+ *hash = ETH_RSS_HASH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ if (indir)
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+ return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM 3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ring_chain_node *node;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int i, vector_id;
+ int status;
+ u8 type;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ type = en ?
+ HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ req->msg[0] = type;
+ req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ i++;
+ /* msg[2] is cause num */
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+ node->tqp_index;
+ req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
+ hnae_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+ HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+ HCLGEVF_RING_NODE_VARIABLE_NUM) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_MBX_VF_TO_PF,
+ false);
+ req->msg[0] = type;
+ req->msg[1] = vector_id;
+ }
+ }
+
+ if (i > 0) {
+ req->msg[2] = i;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+ return ret;
+ }
+
+ hclgevf_free_vector(hdev, vector);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+ req->msg[1] = en;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", status);
+
+ return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+ false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "TQP enable fail, status =%d.\n", status);
+
+ return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+ return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_queue *queue;
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclgevf_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg[2] = {0};
+
+ msg[0] = en;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+ msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ u8 *new_mac_addr = (u8 *)p;
+ u8 msg_data[ETH_ALEN * 2];
+ int status;
+
+ ether_addr_copy(msg_data, new_mac_addr);
+ ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+ msg_data, ETH_ALEN * 2,
+ false, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+ addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ msg_data[0] = is_kill;
+ memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+ memcpy(&msg_data[3], &proto, sizeof(proto));
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER, msg_data,
+ HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+ NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+ mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+ hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+ hclgevf_mbx_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, service_task);
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+ */
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return true;
+ }
+
+ dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+ return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ if (!hclgevf_check_event_cause(hdev, &clearval))
+ goto skip_sched;
+
+ /* schedule the VF mailbox service task, if not already scheduled */
+ hclgevf_mbx_task_schedule(hdev);
+
+ hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* get queue configuration from PF */
+ ret = hclge_get_queue_info(hdev);
+ if (ret)
+ return ret;
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector =
+ hdev->vector_status[hdev->num_msi_used];
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i, ret;
+
+ rss_cfg->rss_size = hdev->rss_size_max;
+
+ /* Initialize RSS indirect table for each vport */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+ ret = hclgevf_set_rss_indir_table(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ /* other vlan config(like, VLAN TX/RX offload) would also be added
+ * here later
+ */
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ /* ring enable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, queue_id;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ /* Ring disable */
+ queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ if (queue_id < 0) {
+ dev_warn(&hdev->pdev->dev,
+ "Get invalid queue id, ignore it\n");
+ continue;
+ }
+
+ hclgevf_tqp_enable(hdev, queue_id, 0, false);
+ }
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ /* setup tasks for the MBX */
+ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+ /* setup tasks for service timer */
+ timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+ INIT_WORK(&hdev->service_task, hclgevf_service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->service_timer.function)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
+ cancel_work_sync(&hdev->service_task);
+ if (hdev->mbx_service_task.func)
+ cancel_work_sync(&hdev->mbx_service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ hclgevf_get_misc_vector(hdev);
+
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, "hclgevf_cmd", hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+ ret = rc->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ break;
+ case HNAE3_CLIENT_UNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+ struct hnae3_client *client)
+{
+ /* un-init roce, if it exists */
+ if (hdev->roce_client)
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if ((client->ops->uninit_instance) &&
+ (client->type != HNAE3_CLIENT_ROCE))
+ client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ goto err_no_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hdev = hdev;
+ hw->io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_no_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI initialization failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_irq_init;
+ }
+
+ hclgevf_state_init(hdev);
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ goto err_misc_irq_init;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize VF's MTA */
+ hdev->accept_mta_mc = true;
+ ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to set mta filter mode\n", ret);
+ goto err_config;
+ }
+
+ /* Initialize RSS for this VF */
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+ return 0;
+
+err_config:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_irq_init:
+ hclgevf_pci_uninit(hdev);
+ return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_cmd_uninit(hdev);
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = hdev->num_tqps;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .init_client_instance = hclgevf_register_client,
+ .uninit_client_instance = hclgevf_unregister_client,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_indir_size = hclgevf_get_rss_indir_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .get_channels = hclgevf_get_channels,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .name = HCLGEVF_NAME,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000000..a63bee4a3674
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM 0
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGEVF_RSS_KEY_SIZE 40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ /* task states */
+ HCLGEVF_STATE_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+ u8 mac_addr[ETH_ALEN];
+ int link;
+};
+
+struct hclgevf_hw {
+ void __iomem *io_base;
+ int num_vec;
+ struct hclgevf_cmq cmq;
+ struct hclgevf_mac mac;
+ void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+ struct device *dev; /* device for DMA mapping */
+ struct hnae3_queue q;
+ struct hlcgevf_tqp_stats tqp_stats;
+ u16 index; /* global index in a NIC controller */
+
+ bool alloced;
+};
+
+struct hclgevf_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+ u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+ u32 hash_algo;
+ u32 rss_size;
+ u8 hw_tc_map;
+ u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclgevf_rss_cfg rss_cfg;
+ unsigned long state;
+
+ u32 fw_version;
+ u16 num_tqps; /* num task queue pairs of this PF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_desc;
+ u8 hw_tc_map;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+
+ bool accept_mta_mc; /* whether to accept mta filter multicast */
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
+ struct work_struct mbx_service_task;
+
+ struct hclgevf_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000000..e39cad285fa9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to prtect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USCOEND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ udelay(HCLGEVF_SLEEP_USCOEND);
+ i++;
+ }
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx resp(=%d) from PF in %d tries\n",
+ hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%d,code1=%d), %d",
+ code0, code1, mbx_resp->resp_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+ const u8 *msg_data, u8 msg_len, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ /* first two bytes are reserved for code & subcode */
+ if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ dev_err(&hdev->pdev->dev,
+ "VF send mbx msg fail, msg len %d exceeds max len %d\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->msg[0] = code;
+ req->msg[1] = subcode;
+ memcpy(&req->msg[2], msg_data, msg_len);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_mbx_resp_status *resp;
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclgevf_cmq_ring *crq;
+ struct hclgevf_desc *desc;
+ u16 link_status, flag;
+ u8 *temp;
+ int i;
+
+ resp = &hdev->mbx_resp;
+ crq = &hdev->hw.cmq.crq;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ switch (req->msg[0]) {
+ case HCLGE_MBX_PF_VF_RESP:
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%d)\n",
+ req->msg[1]);
+ resp->received_resp = true;
+
+ resp->origin_mbx_msg = (req->msg[1] << 16);
+ resp->origin_mbx_msg |= req->msg[2];
+ resp->resp_status = req->msg[3];
+
+ temp = (u8 *)&req->msg[4];
+ for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+ resp->additional_info[i] = *temp;
+ temp++;
+ }
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_status = le16_to_cpu(req->msg[1]);
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
+
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%d) mbx msg from PF\n",
+ req->msg[0]);
+ break;
+ }
+ hclge_mbx_ring_ptr_move_crq(crq);
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff2450ed6..354c0982847b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
static inline int emac_phy_supports_gige(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline int emac_phy_gpcs(int phy_mode)
{
- return phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline void emac_tx_enable(struct emac_instance *dev)
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_TFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
@@ -2865,7 +2871,7 @@ static int emac_init_config(struct emac_instance *dev)
/* PHY mode needs some decoding */
dev->phy_mode = of_get_phy_mode(np);
if (dev->phy_mode < 0)
- dev->phy_mode = PHY_MODE_NA;
+ dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3168,7 +3174,7 @@ static int emac_probe(struct platform_device *ofdev)
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
- if (dev->phy_mode == PHY_MODE_SGMII)
+ if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
if (dev->phy.address >= 0)
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27ceebb..e2f80cca9bed 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
} u1;
};
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
#define EMAC_MR0_TXI 0x40000000
@@ -151,9 +138,11 @@ struct emac_regs {
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_8K 0x00040000
#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
-#define EMAC_STACR_STAC_WRITE 0x00002000
+#define EMAC_STACR_STAC_WRITE 0x00000800
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 35865d05fccd..aa070c063e48 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
- if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+ if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c4a1ac38bba8..00f5999de3cf 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,43 +52,28 @@
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_MII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
- switch (mode) {
- case PHY_MODE_RGMII:
- return "RGMII";
- case PHY_MODE_TBI:
- return "TBI";
- case PHY_MODE_GMII:
- return "GMII";
- case PHY_MODE_MII:
- return "MII";
- case PHY_MODE_RTBI:
- return "RTBI";
- default:
- BUG();
- }
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_MII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
return RGMII_FER_RGMII(input);
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
return RGMII_FER_TBI(input);
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
return RGMII_FER_GMII(input);
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return RGMII_FER_MII(input);
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
return RGMII_FER_RTBI(input);
default:
BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
- ofdev->dev.of_node, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node, input, phy_modes(mode));
++dev->users;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 89c42d362292..fdcc734541fe 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
*/
static inline int zmii_valid_mode(int mode)
{
- return mode == PHY_MODE_MII ||
- mode == PHY_MODE_RMII ||
- mode == PHY_MODE_SMII ||
- mode == PHY_MODE_NA;
+ return mode == PHY_INTERFACE_MODE_MII ||
+ mode == PHY_INTERFACE_MODE_RMII ||
+ mode == PHY_INTERFACE_MODE_SMII ||
+ mode == PHY_INTERFACE_MODE_NA;
}
static inline const char *zmii_mode_name(int mode)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return "MII";
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return "RMII";
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return "SMII";
default:
BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
static inline u32 zmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return ZMII_FER_MII(input);
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return ZMII_FER_RMII(input);
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return ZMII_FER_SMII(input);
default:
return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
* Please, always specify PHY mode in your board port to avoid
* any surprises.
*/
- if (dev->mode == PHY_MODE_NA) {
- if (*mode == PHY_MODE_NA) {
+ if (dev->mode == PHY_INTERFACE_MODE_NA) {
+ if (*mode == PHY_INTERFACE_MODE_NA) {
u32 r = dev->fer_save;
ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
- dev->mode = PHY_MODE_MII;
+ dev->mode = PHY_INTERFACE_MODE_MII;
else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
- dev->mode = PHY_MODE_RMII;
+ dev->mode = PHY_INTERFACE_MODE_RMII;
else
- dev->mode = PHY_MODE_SMII;
- } else
+ dev->mode = PHY_INTERFACE_MODE_SMII;
+ } else {
dev->mode = *mode;
-
+ }
printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
ofdev->dev.of_node,
zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
- if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+ if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%pOF: invalid mode %d specified for input %d\n",
ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
mutex_init(&dev->lock);
dev->ofdev = ofdev;
- dev->mode = PHY_MODE_NA;
+ dev->mode = PHY_INTERFACE_MODE_NA;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef37d3a..8c3058d5d191 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -59,6 +59,7 @@
#include <linux/mm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -410,6 +411,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
int i, j, rc;
+ u64 *size_array;
+
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +422,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size *
+ rx_pool->buff_size);
+ } else {
+ rc = reset_long_term_buff(adapter,
+ &rx_pool->long_term_buff);
+ }
+
if (rc)
return rc;
@@ -439,14 +454,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
- int rx_scrqs;
int i, j;
if (!adapter->rx_pool)
return;
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- for (i = 0; i < rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +482,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
+ adapter->num_active_rx_pools = 0;
}
static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +507,8 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
+ adapter->num_active_rx_pools = 0;
+
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
@@ -536,6 +552,8 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->next_free = 0;
}
+ adapter->num_active_rx_pools = rxadd_subcrqs;
+
return 0;
}
@@ -586,13 +604,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
- int i, tx_scrqs;
+ int i;
if (!adapter->tx_pool)
return;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- for (i = 0; i < tx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_tx_pools; i++) {
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff);
@@ -603,6 +620,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
+ adapter->num_active_tx_pools = 0;
}
static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +637,8 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool)
return -1;
+ adapter->num_active_tx_pools = 0;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
@@ -666,6 +686,8 @@ static int init_tx_pools(struct net_device *netdev)
tx_pool->producer_index = 0;
}
+ adapter->num_active_tx_pools = tx_subcrqs;
+
return 0;
}
@@ -756,6 +778,12 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (adapter->renegotiate);
+ /* handle pending MAC address changes after successful login */
+ if (adapter->mac_change_pending) {
+ __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+ adapter->mac_change_pending = false;
+ }
+
return 0;
}
@@ -854,7 +882,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- reinit_completion(&adapter->fw_done);
+ init_completion(&adapter->fw_done);
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +944,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (!adapter->vpd)
return -ENOMEM;
+ /* Vital Product Data (VPD) */
+ rc = ibmvnic_get_vpd(adapter);
+ if (rc) {
+ netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+ return rc;
+ }
+
adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1024,10 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc, vpd;
+ int rc;
mutex_lock(&adapter->reset_lock);
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@@ -1017,11 +1047,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
- /* Vital Product Data (VPD) */
- vpd = ibmvnic_get_vpd(adapter);
- if (vpd)
- netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
-
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1153,6 +1178,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_ARP)) {
+ hdr_len[1] = arp_hdr_len(skb->dev);
+ hdr_len[2] = 0;
}
memset(hdr_data, 0, 120);
@@ -1275,6 +1303,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst;
u64 *handle_array;
int index = 0;
+ u8 proto = 0;
int ret = 0;
if (adapter->resetting) {
@@ -1363,17 +1392,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (skb->protocol == htons(ETH_P_IP)) {
- if (ip_hdr(skb)->version == 4)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
- else if (ip_hdr(skb)->version == 6)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
- else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+ proto = ip_hdr(skb)->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+ proto = ipv6_hdr(skb)->nexthdr;
}
+ if (proto == IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+ else if (proto == IPPROTO_UDP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2;
@@ -1386,7 +1416,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
/* determine if l2/3/4 headers are sent to firmware */
if ((*hdrs >> 7) & 1 &&
(skb->protocol == htons(ETH_P_IP) ||
- skb->protocol == htons(ETH_P_IPV6))) {
+ skb->protocol == htons(ETH_P_IPV6) ||
+ skb->protocol == htons(ETH_P_ARP))) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->indir_arr[0] = tx_crq;
@@ -1517,25 +1548,29 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+
+ init_completion(&adapter->fw_done);
ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return 0;
+ return adapter->fw_done_rc ? -EIO : 0;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ int rc;
- if (adapter->state != VNIC_OPEN) {
+ if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
adapter->mac_change_pending = true;
return 0;
}
- __ibmvnic_set_mac(netdev, addr);
+ rc = __ibmvnic_set_mac(netdev, addr);
- return 0;
+ return rc;
}
/**
@@ -1545,6 +1580,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ u64 old_num_rx_queues, old_num_tx_queues;
struct net_device *netdev = adapter->netdev;
int i, rc;
@@ -1554,6 +1590,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
if (rc)
@@ -1598,6 +1637,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = init_resources(adapter);
if (rc)
return rc;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
+ adapter->req_tx_queues != old_num_tx_queues) {
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ init_rx_pools(netdev);
+ init_tx_pools(netdev);
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -2448,6 +2493,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
struct ibmvnic_sub_crq_queue *scrq = instance;
struct ibmvnic_adapter *adapter = scrq->adapter;
+ /* When booting a kdump kernel we can hit pending interrupts
+ * prior to completing driver initialization.
+ */
+ if (unlikely(adapter->state != VNIC_OPEN))
+ return IRQ_NONE;
+
adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -3345,7 +3396,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
return;
}
+ adapter->ip_offload_ctrl.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+ adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+ adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3518,8 +3573,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
-static void handle_change_mac_rsp(union ibmvnic_crq *crq,
- struct ibmvnic_adapter *adapter)
+static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->vdev->dev;
@@ -3528,10 +3583,13 @@ static void handle_change_mac_rsp(union ibmvnic_crq *crq,
rc = crq->change_mac_addr_rsp.rc.code;
if (rc) {
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
- return;
+ goto out;
}
memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
ETH_ALEN);
+out:
+ complete(&adapter->fw_done);
+ return rc;
}
static void handle_request_cap_rsp(union ibmvnic_crq *crq,
@@ -3585,7 +3643,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- *req_value = be64_to_cpu(crq->request_capability_rsp.number);
+
+ if (be16_to_cpu(crq->request_capability_rsp.capability) ==
+ REQ_MTU) {
+ pr_err("mtu of %llu is not supported. Reverting.\n",
+ *req_value);
+ *req_value = adapter->fallback.mtu;
+ } else {
+ *req_value =
+ be64_to_cpu(crq->request_capability_rsp.number);
+ }
+
ibmvnic_send_req_caps(adapter, 1);
return;
default:
@@ -3981,7 +4049,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
- handle_change_mac_rsp(crq, adapter);
+ adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
break;
case ERROR_INDICATION:
netdev_dbg(netdev, "Got Error Indication\n");
@@ -4285,7 +4353,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
- IBMVNIC_MAX_TX_QUEUES);
+ IBMVNIC_MAX_QUEUES);
if (!netdev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e2c266..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,7 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_TX_QUEUES 5
+#define IBMVNIC_MAX_QUEUES 10
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+ u64 num_active_rx_pools;
+ u64 num_active_tx_pools;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea79e9fa..8fd2458060a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a1ffbb..dc71e87c3260 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
p = (char *)adapter + stat->stat_offset;
break;
default:
- WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
- stat->type, i);
+ netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+ stat->type, i);
continue;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f7917a8d..3dd4aeb2706d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7acf03..31277d3bb7dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9f18d39bdc8f..1298b69f990b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3303,9 +3303,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
- ew32(RXDCTL(0), rxdctl | 0x3);
+ ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
}
+ dev_info(&adapter->pdev->dev,
+ "Some CPU C-states have been disabled in order to enable jumbo frames\n");
pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
pm_qos_update_request(&adapter->pm_qos_req,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index ea3ab24265ee..760cfa52d02c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -353,7 +353,7 @@ int fm10k_iov_resume(struct pci_dev *pdev)
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* allocate all but the last GLORT to the VFs */
- if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
+ if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
break;
/* assign GLORT to VF, and restrict it to multicast */
@@ -511,7 +511,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return err;
/* allocate VFs if not already allocated */
- if (num_vfs && (num_vfs != current_vfs)) {
+ if (num_vfs && num_vfs != current_vfs) {
/* Disable completer abort error reporting as
* the VFs can trigger this any time they read a queue
* that they don't own.
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 538b42d5c187..8e12aae065d8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -446,13 +446,13 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
+ /* Record Rx queue, or update macvlan statistics */
if (!l2_accel)
- return;
-
- /* update MACVLAN statistics */
- macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
- !!(rx_desc->w.hdr_info &
- cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
/**
@@ -479,8 +479,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
- skb_record_rx_queue(skb, rx_ring->queue_index);
-
FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
if (rx_desc->w.vlan) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index adc62fb38c49..a38ae5c54da3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -934,8 +934,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (vid >= VLAN_N_VID)
return -EINVAL;
- /* Verify we have permission to add VLANs */
- if (hw->mac.vlan_override)
+ /* Verify that we have permission to add VLANs. If this is a request
+ * to remove a VLAN, we still want to allow the user to remove the
+ * VLAN device. In that case, we need to clear the bit in the
+ * active_vlans bitmask.
+ */
+ if (set && hw->mac.vlan_override)
return -EACCES;
/* update active_vlans bitmask */
@@ -954,6 +958,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
rx_ring->vid &= ~FM10K_VLAN_CLEAR;
}
+ /* If our VLAN has been overridden, there is no reason to send VLAN
+ * removal requests as they will be silently ignored.
+ */
+ if (hw->mac.vlan_override)
+ return 0;
+
/* Do not remove default VLAN ID related entries from VLAN and MAC
* tables
*/
@@ -1040,14 +1050,13 @@ static int __fm10k_uc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1106,14 +1115,13 @@ static int __fm10k_mc_sync(struct net_device *dev,
const unsigned char *addr, bool sync)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
s32 err;
if (!is_multicast_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = fm10k_queue_mac_request(interface, glort,
@@ -1157,10 +1165,12 @@ static void fm10k_set_rx_mode(struct net_device *dev)
/* update xcast mode first, but only if it changed */
if (interface->xcast_mode != xcast_mode) {
- /* update VLAN table */
+ /* update VLAN table when entering promiscuous mode */
if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
0, true);
+
+ /* clear VLAN table when exiting promiscuous mode */
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
@@ -1182,9 +1192,10 @@ static void fm10k_set_rx_mode(struct net_device *dev)
void fm10k_restore_rx_state(struct fm10k_intfc *interface)
{
+ struct fm10k_l2_accel *l2_accel = interface->l2_accel;
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
- int xcast_mode;
+ int xcast_mode, i;
u16 vid, glort;
/* record glort for this interface */
@@ -1211,11 +1222,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
xcast_mode == FM10K_XCAST_MODE_PROMISC);
- /* Add filter for VLAN 0 */
- fm10k_queue_vlan_request(interface, 0, 0, true);
-
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
+ for (vid = fm10k_find_next_vlan(interface, 0);
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
fm10k_queue_vlan_request(interface, vid, 0, true);
@@ -1234,6 +1242,24 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
+ /* synchronize macvlan addresses */
+ if (l2_accel) {
+ for (i = 0; i < l2_accel->size; i++) {
+ struct net_device *sdev = l2_accel->macvlan[i];
+
+ if (!sdev)
+ continue;
+
+ glort = l2_accel->dglort + 1 + i;
+
+ hw->mac.ops.update_xcast_mode(hw, glort,
+ FM10K_XCAST_MODE_MULTI);
+ fm10k_queue_mac_request(interface, glort,
+ sdev->dev_addr,
+ hw->mac.default_vid, true);
+ }
+ }
+
fm10k_mbx_unlock(interface);
/* record updated xcast mode state */
@@ -1490,7 +1516,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, true);
+ hw->mac.default_vid, true);
}
fm10k_mbx_unlock(interface);
@@ -1530,7 +1556,7 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
- 0, false);
+ hw->mac.default_vid, false);
}
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f605221a686..a434fecfdfeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
return err;
}
-#ifdef CONFIG_PM
/**
* fm10k_resume - Generic PM resume hook
* @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
-
/**
* fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &fm10k_pm_ops,
},
-#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 425d814aed4d..d6406fc31ffb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -866,7 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
* used here to indicate to the VF that it will not have privilege to
* write VLAN_TABLE. All policy is enforced on the PF but this allows
- * the VF to correctly report errors to userspace rqeuests.
+ * the VF to correctly report errors to userspace requests.
*/
if (vf_info->pf_vid)
vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index e019baa905c5..46e9f4e0a02c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -508,39 +508,40 @@ struct i40e_pf {
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_FILTER_SYNC BIT(5)
-#define I40E_FLAG_SRIOV_ENABLED BIT(6)
-#define I40E_FLAG_DCB_CAPABLE BIT(7)
-#define I40E_FLAG_DCB_ENABLED BIT(8)
-#define I40E_FLAG_FD_SB_ENABLED BIT(9)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
-#define I40E_FLAG_MFP_ENABLED BIT(13)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
-#define I40E_FLAG_LEGACY_RX BIT(21)
-#define I40E_FLAG_PTP BIT(22)
-#define I40E_FLAG_IWARP_ENABLED BIT(23)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
-#define I40E_FLAG_CLIENT_RESET BIT(26)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
-#define I40E_FLAG_TC_MQPRIO BIT(29)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(5)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(8)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(13)
+#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20)
+#define I40E_FLAG_LEGACY_RX BIT_ULL(21)
+#define I40E_FLAG_PTP BIT_ULL(22)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
+#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
+#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
+#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
+#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9af74253c3f7..e78971605e0b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -907,10 +907,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -971,7 +976,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1027,7 +1032,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8f91ba..a852775d3059 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1594,7 +1595,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1605,7 +1606,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1614,8 +1615,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2231,8 +2232,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2492,6 +2497,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 1b1e2acbd07f..0de9610c1d8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -378,11 +378,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
if (!client || !cdev)
return;
- /* Here we handle client opens. If the client is down, but
- * the netdev is up, then open the client.
+ /* Here we handle client opens. If the client is down, and
+ * the netdev is registered, then open the client.
*/
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- if (!test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ if (vsi->netdev_registered &&
client->ops && client->ops->open) {
set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
ret = client->ops->open(&cdev->lan_info, client);
@@ -393,17 +393,19 @@ void i40e_client_subtask(struct i40e_pf *pf)
i40e_client_del_instance(pf);
}
}
- } else {
- /* Likewise for client close. If the client is up, but the netdev
- * is down, then close the client.
- */
- if (test_bit(__I40E_VSI_DOWN, vsi->state) &&
- client->ops && client->ops->close) {
- clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
- client->ops->close(&cdev->lan_info, client, false);
- i40e_client_release_qvlist(&cdev->lan_info);
- }
}
+
+ /* enable/disable PE TCP_ENA flag based on netdev down/up
+ */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
+ else
+ i40e_client_update_vsi_ctxt(&cdev->lan_info, client,
+ 0, 0,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE,
+ I40E_CLIENT_VSI_FLAG_TCP_ENABLE);
}
/**
@@ -717,13 +719,13 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return -ENOENT;
}
- if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
- } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
- !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_ENABLE)) {
ctxt.info.valid_sections =
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 15b21a5315b5..ba55c889e4c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -132,7 +132,7 @@ struct i40e_info {
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
-#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
struct i40e_ops {
/* setup_q_vector_list enables queues with a particular vector */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f268bd..ef5a868aae46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -278,6 +278,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1486,6 +1488,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1534,6 +1537,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1544,9 +1548,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = false;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -3465,13 +3466,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3490,6 +3492,16 @@ i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = cpu_to_le32(offset);
cmd->length = cpu_to_le16(length);
@@ -3629,7 +3641,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -5236,7 +5275,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -5246,7 +5285,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5294,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5310,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -5282,14 +5321,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -5364,11 +5403,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5412,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -5397,7 +5431,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5473,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf7212d4f..2f5bee713fef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+ I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+ I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+ I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
I40E_PF_STAT("rx_size_64", stats.rx_size_64),
I40E_PF_STAT("rx_size_127", stats.rx_size_127),
I40E_PF_STAT("rx_size_255", stats.rx_size_255),
@@ -229,6 +233,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1585,6 +1590,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ /* Clear cloned XDP RX-queue info before setup call */
+ memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -2299,6 +2306,8 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = vsi->rx_rings[queue];
+ struct i40e_ring *tx_ring = vsi->tx_rings[queue];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
@@ -2306,26 +2315,26 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
- vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
- vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
if (ec->use_adaptive_rx_coalesce)
- vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
- vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
else
- vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- q_vector = vsi->rx_rings[queue]->q_vector;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = vsi->tx_rings[queue]->q_vector;
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
@@ -2740,16 +2749,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
no_input_set:
if (input_set & I40E_L3_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
if (input_set & I40E_L3_DST_MASK)
- fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
+ fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
if (input_set & I40E_L4_SRC_MASK)
- fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
if (input_set & I40E_L4_DST_MASK)
- fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
+ fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
@@ -3800,6 +3809,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
i40e_write_fd_input_set(pf, index, new_mask);
+ /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
+ * frames. If we're programming the input set for IPv4/Other, we also
+ * need to program the IPv4/Fragmented input set. Since we don't have
+ * separate support, we'll always assume and enforce that the two flow
+ * types must have matching input sets.
+ */
+ if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ new_mask);
+
/* Add the new offset and update table, if necessary */
if (new_flex_offset) {
err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
@@ -3822,6 +3841,87 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
}
/**
+ * i40e_match_fdir_filter - Return true of two filters match
+ * @a: pointer to filter struct
+ * @b: pointer to filter struct
+ *
+ * Returns true if the two filters match exactly the same criteria. I.e. they
+ * match the same flow type and have the same parameters. We don't need to
+ * check any input-set since all filters of the same flow type must use the
+ * same input set.
+ **/
+static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
+ struct i40e_fdir_filter *b)
+{
+ /* The filters do not much if any of these criteria differ. */
+ if (a->dst_ip != b->dst_ip ||
+ a->src_ip != b->src_ip ||
+ a->dst_port != b->dst_port ||
+ a->src_port != b->src_port ||
+ a->flow_type != b->flow_type ||
+ a->ip4_proto != b->ip4_proto)
+ return false;
+
+ return true;
+}
+
+/**
+ * i40e_disallow_matching_filters - Check that new filters differ
+ * @vsi: pointer to the targeted VSI
+ * @input: new filter to check
+ *
+ * Due to hardware limitations, it is not possible for two filters that match
+ * similar criteria to be programmed at the same time. This is true for a few
+ * reasons:
+ *
+ * (a) all filters matching a particular flow type must use the same input
+ * set, that is they must match the same criteria.
+ * (b) different flow types will never match the same packet, as the flow type
+ * is decided by hardware before checking which rules apply.
+ * (c) hardware has no way to distinguish which order filters apply in.
+ *
+ * Due to this, we can't really support using the location data to order
+ * filters in the hardware parsing. It is technically possible for the user to
+ * request two filters matching the same criteria but which select different
+ * queues. In this case, rather than keep both filters in the list, we reject
+ * the 2nd filter when the user requests adding it.
+ *
+ * This avoids needing to track location for programming the filter to
+ * hardware, and ensures that we avoid some strange scenarios involving
+ * deleting filters which match the same criteria.
+ **/
+static int i40e_disallow_matching_filters(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+
+ /* Loop through every filter, and check that it doesn't match */
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* Don't check the filters match if they share the same fd_id,
+ * since the new filter is actually just updating the target
+ * of the old filter.
+ */
+ if (rule->fd_id == input->fd_id)
+ continue;
+
+ /* If any filters match, then print a warning message to the
+ * kernel message buffer and bail out.
+ */
+ if (i40e_match_fdir_filter(rule, input)) {
+ dev_warn(&pf->pdev->dev,
+ "Existing user defined filter %d already matches this flow.\n",
+ rule->fd_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
* i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
@@ -3933,19 +4033,25 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->flex_offset = userdef.flex_offset;
}
- ret = i40e_add_del_fdir(vsi, input, true);
+ /* Avoid programming two filters with identical match criteria. */
+ ret = i40e_disallow_matching_filters(vsi, input);
if (ret)
- goto free_input;
+ goto free_filter_memory;
/* Add the input filter to the fdir_input_list, possibly replacing
* a previous filter. Do not free the input structure after adding it
* to the list as this would cause a use-after-free bug.
*/
i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
-
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ goto remove_sw_rule;
return 0;
-free_input:
+remove_sw_rule:
+ hlist_del(&input->fdir_node);
+ pf->fdir_pf_active_filters--;
+free_filter_memory:
kfree(input);
return ret;
}
@@ -4258,7 +4364,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 orig_flags, new_flags, changed_flags;
+ u64 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4309,13 +4415,32 @@ flags_complete:
!(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
return -EOPNOTSUPP;
+ /* Disable FW LLDP not supported if NPAR active or if FW
+ * API version < 1.7
+ */
+ if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->hw.func_caps.npar_enable) {
+ dev_warn(&pf->pdev->dev,
+ "Unable to stop FW LLDP if NPAR active\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pf->hw.aq.api_maj_ver < 1 ||
+ (pf->hw.aq.api_maj_ver == 1 &&
+ pf->hw.aq.api_min_ver < 7)) {
+ dev_warn(&pf->pdev->dev,
+ "FW ver does not support stopping FW LLDP\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
/* Compare and exchange the new flags into place. If we failed, that
* is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
- if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
+ if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@@ -4354,12 +4479,37 @@ flags_complete:
}
}
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ struct i40e_dcbx_config *dcbcfg;
+ int i;
+
+ i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
+ /* reset local_dcbx_config to default */
+ dcbcfg = &pf->hw.local_dcbx_config;
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 0;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etscfg.tcbwtable[i] = 0;
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ dcbcfg->etscfg.prioritytable[i] = 0;
+ dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ } else {
+ i40e_aq_start_lldp(&pf->hw, NULL);
+ }
+ }
+
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
I40E_FLAG_LEGACY_RX |
- I40E_FLAG_SOURCE_PRUNING_DISABLED))
+ I40E_FLAG_SOURCE_PRUNING_DISABLED |
+ I40E_FLAG_DISABLE_FW_LLDP))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be80871..f95ce9b5e4fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ /* Copy the address first, so that we avoid a possible race with
+ * .set_rx_mode(). If we copy after changing the address in the filter
+ * list, we might open ourselves to a narrow race window where
+ * .set_rx_mode could delete our dev_addr filter and prevent traffic
+ * from passing.
+ */
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1811,6 +1818,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+ /* Do not allow use more TC queue pairs than MSI-X vectors exist */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
+
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
@@ -1923,6 +1934,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
i40e_del_mac_filter(vsi, addr);
return 0;
@@ -4107,6 +4126,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
q_vector->num_ringpairs = num_ringpairs;
+ q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
q_vector->rx.count = 0;
q_vector->tx.count = 0;
@@ -4862,104 +4882,6 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
#endif
/**
- * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
- * @q_idx: TX queue number
- * @vsi: Pointer to VSI struct
- *
- * This function checks specified queue for given VSI. Detects hung condition.
- * We proactively detect hung TX queues by checking if interrupts are disabled
- * but there are pending descriptors. If it appears hung, attempt to recover
- * by triggering a SW interrupt.
- **/
-static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
-{
- struct i40e_ring *tx_ring = NULL;
- struct i40e_pf *pf;
- u32 val, tx_pending;
- int i;
-
- pf = vsi->back;
-
- /* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (q_idx == vsi->tx_rings[i]->queue_index) {
- tx_ring = vsi->tx_rings[i];
- break;
- }
- }
- }
-
- if (!tx_ring)
- return;
-
- /* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- val = rd32(&pf->hw,
- I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
- tx_ring->vsi->base_vector - 1));
- else
- val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
-
- tx_pending = i40e_get_tx_pending(tx_ring);
-
- /* Interrupts are disabled and TX pending is non-zero,
- * trigger the SW interrupt (don't wait). Worst case
- * there will be one extra interrupt which may result
- * into not cleaning any queues because queues are cleaned.
- */
- if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
- i40e_force_wb(vsi, tx_ring->q_vector);
-}
-
-/**
- * i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @pf: pointer to PF struct
- *
- * LAN VSI has netdev and netdev has TX queues. This function is to check
- * each of those TX queues if they are hung, trigger recovery by issuing
- * SW interrupt.
- **/
-static void i40e_detect_recover_hung(struct i40e_pf *pf)
-{
- struct net_device *netdev;
- struct i40e_vsi *vsi;
- unsigned int i;
-
- /* Only for LAN VSI */
- vsi = pf->vsi[pf->lan_vsi];
-
- if (!vsi)
- return;
-
- /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
- return;
-
- /* Make sure type is MAIN VSI */
- if (vsi->type != I40E_VSI_MAIN)
- return;
-
- netdev = vsi->netdev;
- if (!netdev)
- return;
-
- /* Bail out if netif_carrier is not OK */
- if (!netif_carrier_ok(netdev))
- return;
-
- /* Go thru' TX queues for netdev */
- for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
-
- q = netdev_get_tx_queue(netdev, i);
- if (q)
- i40e_detect_recover_hung_queue(i, vsi);
- }
-}
-
-/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
*
@@ -5327,6 +5249,8 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
{
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
int ret = 0;
int i;
@@ -5344,10 +5268,40 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+
+ dev_info(&pf->pdev->dev,
"Failed configuring TC map %d for VSI %d\n",
enabled_tc, vsi->seid);
- goto out;
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
+ &bw_config, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed querying vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ goto out;
+ }
+ if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
+ u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
+
+ if (!valid_tc)
+ valid_tc = bw_config.tc_valid_bits;
+ /* Always enable TC0, no matter what */
+ valid_tc |= 1;
+ dev_info(&pf->pdev->dev,
+ "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
+ enabled_tc, bw_config.tc_valid_bits, valid_tc);
+ enabled_tc = valid_tc;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to configure TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
}
/* Update Queue Pairs Mapping for currently enabled UPs */
@@ -5387,13 +5341,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update the VSI after updating the VSI queue-mapping
* information
*/
- ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -5403,11 +5356,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Update current VSI BW information */
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -6038,8 +5990,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
- /* Set L4type to both TCP and UDP support */
- mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+ /* Set L4type for TCP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6373,8 +6325,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
- /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
+ * Also do not enable DCBx if FW LLDP agent is disabled
+ */
+ if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
+ (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
goto out;
/* Get the initial DCB configuration */
@@ -6401,6 +6356,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
+ } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
+ pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %s aq_err %s\n",
@@ -6969,18 +6927,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
- return -EINVAL;
+ return -EOPNOTSUPP;
- /* Make sure port is specified, otherwise bail out, for channel
- * specific cloud filter needs 'L4 port' to be non-zero
+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+ * ports are not supported via big buffer now.
*/
- if (!filter->dst_port)
- return -EINVAL;
+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+ return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +6949,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
- return -EINVAL;
+ return -EOPNOTSUPP;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
@@ -7356,7 +7314,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,9 +7448,6 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
{
struct i40e_vsi *vsi = np->vsi;
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return i40e_configure_clsflower(vsi, cls_flower);
@@ -7510,6 +7465,9 @@ static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct i40e_netdev_priv *np = cb_priv;
+ if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return i40e_setup_tc_cls_flower(np, type_data);
@@ -7727,6 +7685,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -9060,6 +9021,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
vsi->uplink_seid);
return ret;
}
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue +
+ ch->num_queue_pairs;
if (ch->max_tx_rate) {
u64 credits = ch->max_tx_rate;
@@ -9263,6 +9235,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
+ /* Enable FW to write a default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
ret = i40e_init_pf_dcb(pf);
if (ret) {
@@ -9680,7 +9655,7 @@ static void i40e_service_task(struct work_struct *work)
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf);
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -10447,10 +10422,9 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ if (!pf->irq_pile)
return -ENOMEM;
- }
+
pf->irq_pile->num_entries = vectors;
pf->irq_pile->search_hint = 0;
@@ -10768,8 +10742,13 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* Determine the RSS size of the VSI */
if (!vsi->rss_size) {
u16 qcount;
-
- qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+ /* If the firmware does something weird during VSI init, we
+ * could end up with zero TCs. Check for that to avoid
+ * divide-by-zero. It probably won't pass traffic, but it also
+ * won't panic.
+ */
+ qcount = vsi->num_queue_pairs /
+ (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
}
if (!vsi->rss_size)
@@ -10957,7 +10936,7 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
ret = i40e_aq_update_nvm(&pf->hw,
I40E_SR_NVM_CONTROL_WORD,
0x10, sizeof(nvm_word),
- &nvm_word, true, NULL);
+ &nvm_word, true, 0, NULL);
/* Save off last admin queue command status before releasing
* the NVM
*/
@@ -11098,13 +11077,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.aq.fw_maj_ver >= 6)
pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
- if (pf->hw.func_caps.vmdq) {
+ if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
- if (pf->hw.func_caps.iwarp) {
+ if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
pf->flags |= I40E_FLAG_IWARP_ENABLED;
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
@@ -13581,6 +13560,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+
+ /* Enable FW to write default DCB config on link-up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+
#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2ee0d46..76a5cb04e4fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -239,8 +239,9 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
@@ -389,7 +390,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code;
- u16 read_size = *words;
+ u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
u16 i = 0;
@@ -496,7 +497,8 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -677,6 +679,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -686,6 +691,12 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -703,6 +714,7 @@ static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -798,9 +810,9 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ i40e_nvmupd_clear_wait_state(hw);
status = 0;
- goto exit;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -815,7 +827,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
-exit:
+
mutex_unlock(&hw->aq.arq_mutex);
return status;
}
@@ -944,6 +956,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1118,38 +1134,53 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- default:
- break;
- }
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1205,6 +1236,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1267,6 +1301,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1302,6 +1339,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1311,6 +1351,7 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1392,6 +1433,40 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1486,18 +1561,20 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659db822..83798b7841b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -214,7 +214,7 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -225,6 +225,10 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -333,7 +337,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -343,6 +349,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
@@ -400,13 +437,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5f9cac55aa55..afb72e711d43 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66ffc7c..e554aa6cf070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
#include <linux/bpf_trace.h>
+#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
@@ -725,6 +726,59 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ tx_ring = vsi->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40e_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40e_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40e_get_tx_pending(tx_ring) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -902,7 +956,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
val);
} else {
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
@@ -929,8 +983,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
} else {
u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
@@ -1162,6 +1215,7 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1236,6 +1290,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
i40e_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -1256,6 +1312,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ int err = -ENOMEM;
int bi_size;
/* warn if we are about to overwrite the pointer */
@@ -1283,13 +1340,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index);
+ if (err < 0)
+ goto err;
+ }
+
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
return 0;
err:
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
- return -ENOMEM;
+ return err;
}
/**
@@ -2068,11 +2133,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false, xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- struct xdp_buff xdp;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
@@ -2243,7 +2310,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
@@ -2253,8 +2319,6 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
return;
}
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -2303,12 +2367,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
@@ -3047,10 +3111,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -3058,7 +3142,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae1182e2ea..701b708628b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <net/xdp.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
@@ -277,7 +279,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -331,6 +333,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
};
struct i40e_rx_queue_stats {
@@ -428,6 +431,7 @@ struct i40e_ring {
*/
struct i40e_channel *ch;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -498,6 +502,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
+void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e8568719b4e..cd294e6a8587 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -402,6 +402,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -421,15 +422,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -611,6 +618,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1502,19 +1510,19 @@ struct i40e_lldp_variables {
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1526,16 +1534,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1545,8 +1553,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1573,11 +1581,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e068e85..e9309fb9084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)ls->link_speed;
+ i40e_virtchnl_link_speed(ls->link_speed);
}
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
ret = i40e_vc_get_vf_resources_msg(vf, msg);
+ i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 8b0d4b255dea..d1aab6b8bfb1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -837,10 +837,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -901,7 +906,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b04572c518..815de8d9c3fb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,13 +198,14 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- /* Pipeline Personalization Profile */
+ /* Dynamic Device Personalization */
i40e_aqc_opc_write_personalization_profile = 0x0270,
i40e_aqc_opc_get_personalization_profile_list = 0x0271,
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -1562,7 +1563,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
struct i40e_aqc_write_personalization_profile {
u8 flags;
u8 reserved[3];
@@ -1573,7 +1574,7 @@ struct i40e_aqc_write_personalization_profile {
I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
__le32 error_offset;
__le32 error_info;
__le32 addr_high;
@@ -1582,8 +1583,8 @@ struct i40e_aqc_write_ppp_resp {
struct i40e_aqc_get_applied_profiles {
u8 flags;
-#define I40E_AQC_GET_PPP_GET_CONF 0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
u8 rsv[3];
__le32 reserved;
__le32 addr_high;
@@ -2196,8 +2197,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2457,6 +2462,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Apply MIB changes (0x0A07)
* uses the generic struc as it contains no data
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf69b249..67bf5cebb76f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -284,6 +284,8 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1202,7 +1204,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
}
/**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
@@ -1212,7 +1214,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1223,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
- struct i40e_aqc_write_ppp_resp *resp;
+ struct i40e_aqc_write_ddp_resp *resp;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1239,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
if (!status) {
- resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
if (error_offset)
*error_offset = le32_to_cpu(resp->error_offset);
if (error_info)
@@ -1248,16 +1250,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
}
/**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
* @cmd_details: pointer to command details structure or NULL
**/
enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1332,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 offset = 0, info = 0;
u32 i;
- if (!track_id) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
- return I40E_NOT_SUPPORTED;
- }
-
dev_cnt = profile->device_table_count;
for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1341,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
break;
}
if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
@@ -1363,7 +1360,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sizeof(struct i40e_profile_section_header);
/* Write profile */
- status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1402,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
sec->section.offset);
pinfo->track_id = track_id;
pinfo->version = profile->version;
- pinfo->op = I40E_PPP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
- status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b5994075..47c429931a57 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40evf_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7fa7a41915c1..5b222246e08b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -95,6 +95,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f99446d..357d6051281f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -148,6 +148,59 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
+/**
+ * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * @vsi: pointer to vsi struct with tx queues
+ *
+ * VSI has netdev and netdev has TX queues. This function is to check each of
+ * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ **/
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct net_device *netdev;
+ unsigned int i;
+ int packets;
+
+ if (!vsi)
+ return;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ for (i = 0; i < vsi->back->num_active_queues; i++) {
+ tx_ring = &vsi->back->tx_rings[i];
+ if (tx_ring && tx_ring->desc) {
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt_ctr would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.packets & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ i40evf_force_wb(vsi, tx_ring->q_vector);
+ continue;
+ }
+
+ /* Memory barrier between read of packet count and call
+ * to i40evf_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt_ctr =
+ i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
+ }
+ }
+}
+
#define WB_STRIDE 4
/**
@@ -316,8 +369,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
- vsi->base_vector - 1), val);
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
q_vector->arm_wb_state = true;
}
@@ -336,7 +388,7 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
val);
}
@@ -469,6 +521,7 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt_ctr = -1;
return 0;
err:
@@ -1444,12 +1497,9 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_hw *hw = &vsi->back->hw;
bool rx = false, tx = false;
u32 rxval, txval;
- int vector;
int idx = q_vector->v_idx;
int rx_itr_setting, tx_itr_setting;
- vector = (q_vector->v_idx + vsi->base_vector);
-
/* avoid dynamic calculation if in countdown mode OR if
* all dynamic is disabled
*/
@@ -1498,12 +1548,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval |= BIT(31);
/* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(vector - 1), rxval);
+ wr32(hw, INTREG(q_vector->reg_idx), rxval);
}
enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(vector - 1), txval);
+ wr32(hw, INTREG(q_vector->reg_idx), txval);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
@@ -2012,10 +2062,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2023,7 +2093,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8d26c85d12e1..7798a6645c3f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -260,7 +260,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
}
/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
@@ -313,6 +313,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -467,6 +468,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773dfad6..54951c84a481 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -361,6 +361,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -380,15 +381,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -561,6 +568,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -1422,19 +1430,19 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
u8 major;
u8 minor;
u8 update;
u8 draft;
};
-#define I40E_PPP_NAME_SIZE 32
+#define I40E_DDP_NAME_SIZE 32
/* Package header */
struct i40e_package_header {
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 segment_count;
u32 segment_offset[1];
};
@@ -1446,16 +1454,16 @@ struct i40e_generic_seg_header {
#define SEGMENT_TYPE_I40E 0x00000011
#define SEGMENT_TYPE_X722 0x00000012
u32 type;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 size;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u32 track_id;
- char name[I40E_PPP_NAME_SIZE];
+ char name[I40E_DDP_NAME_SIZE];
};
struct i40e_device_id_entry {
@@ -1465,8 +1473,8 @@ struct i40e_device_id_entry {
struct i40e_profile_segment {
struct i40e_generic_seg_header header;
- struct i40e_ppp_version version;
- char name[I40E_PPP_NAME_SIZE];
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
struct i40e_device_id_entry device_table[1];
};
@@ -1493,11 +1501,11 @@ struct i40e_profile_section_header {
struct i40e_profile_info {
u32 track_id;
- struct i40e_ppp_version version;
+ struct i40e_ddp_version version;
u8 op;
-#define I40E_PPP_ADD_TRACKID 0x01
-#define I40E_PPP_REMOVE_TRACKID 0x02
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
u8 reserved[7];
- u8 name[I40E_PPP_NAME_SIZE];
+ u8 name[I40E_DDP_NAME_SIZE];
};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af521d602..9690c1ea019e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -114,14 +114,14 @@ struct i40e_q_vector {
struct i40evf_adapter *adapter;
struct i40e_vsi *vsi;
struct napi_struct napi;
- unsigned long reg_idx;
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
#define ITR_COUNTDOWN_START 100
u8 itr_countdown; /* when 0 or 1 update ITR */
- int v_idx; /* vector index in list */
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
cpumask_t affinity_mask;
@@ -187,6 +187,7 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
__I40EVF_IN_CLIENT_TASK,
+ __I40EVF_IN_REMOVE_TASK, /* device being removed */
};
/* board specific private data structure */
@@ -199,6 +200,9 @@ struct i40evf_adapter {
wait_queue_head_t down_waitqueue;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
int num_req_queues;
@@ -206,7 +210,6 @@ struct i40evf_adapter {
/* TX */
struct i40e_ring *tx_rings;
u32 tx_timeout_count;
- struct list_head mac_filter_list;
u32 tx_desc_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index da006fa3fec1..e2d8aa19d205 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -512,31 +512,31 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
struct ethtool_coalesce *ec,
int queue)
{
+ struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
+ struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
u16 vector;
- adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
- adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
+ tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
- if (ec->use_adaptive_rx_coalesce)
- adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_rx_coalesce)
+ rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
- if (ec->use_adaptive_tx_coalesce)
- adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ if (!ec->use_adaptive_tx_coalesce)
+ tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
- q_vector = adapter->rx_rings[queue].q_vector;
- q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
- q_vector = adapter->tx_rings[queue].q_vector;
- q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
vector = vsi->base_vector + q_vector->v_idx;
wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eba92e2..16989ad2ca90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
- }
- }
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
- struct i40e_hw *hw = &adapter->hw;
- int i;
- u32 dyn_ctl;
-
- if (mask & 1) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
- }
- for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i)) {
- dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct net_device *netdev = data;
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
- u32 val;
/* handle non-queue interrupts, these reads clear the registers */
- val = rd32(hw, I40E_VFINT_ICR01);
- val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
- val = rd32(hw, I40E_VFINT_DYN_CTL01) |
- I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
- wr32(hw, I40E_VFINT_DYN_CTL01, val);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* @adapter: board private structure
* @vlan: vlan tag
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f = NULL;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- goto out;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
clearout:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -768,21 +727,16 @@ out:
static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
{
struct i40evf_vlan_filter *f;
- int count = 50;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* @adapter: board private structure
* @macaddr: the MAC address
*
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
u8 *macaddr)
{
struct i40evf_mac_filter *f;
- int count = 50;
if (!macaddr)
return NULL;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0)
- return NULL;
- }
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f) {
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section);
- return NULL;
- }
+ if (!f)
+ goto clearout;
ether_addr_copy(f->macaddr, macaddr);
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
- int count = 50;
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
i40evf_add_filter(adapter, mca->addr);
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section)) {
- udelay(1);
- if (--count == 0) {
- dev_err(&adapter->pdev->dev,
- "Failed to get lock in %s\n", __func__);
- return;
- }
- }
- /* remove filter if not in netdev list */
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ bottom_of_search_loop:
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
/**
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
**/
void i40evf_down(struct i40evf_adapter *adapter)
{
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
- &adapter->crit_section))
- usleep_range(500, 1000);
-
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
f->remove = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1441,6 +1387,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
+ q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT);
@@ -1770,13 +1717,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING) {
- i40evf_irq_enable_queues(adapter, ~0);
- i40evf_fire_sw_int(adapter, 0xFF);
- } else {
- i40evf_fire_sw_int(adapter, 0x1);
- }
-
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_detect_recover_hung(&adapter->vsi);
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1738,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ if (adapter->state == __I40EVF_RUNNING) {
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -1808,6 +1754,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
i40evf_free_all_rx_resources(adapter);
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
@@ -1819,6 +1767,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
kfree(fv);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1854,6 +1804,13 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
+ bool running;
+
+ /* When device is being removed it doesn't make sense to run the reset
+ * task, just return in such a case.
+ */
+ if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return;
while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
@@ -1913,7 +1870,13 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(netdev)) {
+ /* We don't use netif_running() because it may be true prior to
+ * ndo_open() returning, so we can't assume it means all our open
+ * tasks have finished, since we're not holding the rtnl_lock here.
+ */
+ running = (adapter->state == __I40EVF_RUNNING);
+
+ if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1948,6 +1911,8 @@ continue_reset:
adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
@@ -1956,15 +1921,19 @@ continue_reset:
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
vlf->add = true;
}
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
- if (netif_running(adapter->netdev)) {
+ /* We were running when the reset started, so we need to restore some
+ * state here.
+ */
+ if (running) {
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
if (err)
@@ -1993,9 +1962,13 @@ continue_reset:
adapter->state = __I40EVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -2239,8 +2212,14 @@ static int i40evf_open(struct net_device *netdev)
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN)
- return -EBUSY;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ if (adapter->state != __I40EVF_DOWN) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
/* allocate transmit descriptors */
err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2243,8 @@ static int i40evf_open(struct net_device *netdev)
i40evf_irq_enable(adapter, true);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
return 0;
err_req_irq:
@@ -2273,6 +2254,8 @@ err_setup_rx:
i40evf_free_all_rx_resources(adapter);
err_setup_tx:
i40evf_free_all_tx_resources(adapter);
+err_unlock:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
@@ -2296,6 +2279,9 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2291,8 @@ static int i40evf_close(struct net_device *netdev)
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2357,13 +2345,19 @@ static int i40evf_set_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ /* Don't allow changing VLAN_RX flag when VLAN is set for VF
+ * and return an error in this case
+ */
+ if (VLAN_ALLOWED(adapter)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ adapter->aq_required |=
+ I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
return -EINVAL;
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ }
return 0;
}
@@ -2943,6 +2937,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
+ spin_lock_init(&adapter->mac_vlan_list_lock);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2985,6 +2981,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
if (netif_running(netdev)) {
rtnl_lock();
i40evf_down(adapter);
@@ -2993,6 +2993,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
retval = pci_save_state(pdev);
if (retval)
return retval;
@@ -3066,7 +3068,8 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
-
+ /* Indicate we are in remove and not to run reset_task */
+ set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@@ -3101,8 +3104,6 @@ static void i40evf_remove(struct pci_dev *pdev)
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
- flush_scheduled_work();
-
i40evf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -3118,6 +3119,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
@@ -3130,6 +3132,8 @@ static void i40evf_remove(struct pci_dev *pdev)
kfree(f);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a3907c..50ce0d6c09ef 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->current_op);
return;
}
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
- if (!veal)
+ if (!veal) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->add)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
return;
}
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->remove)
count++;
}
if (!count) {
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
more = true;
}
vvfl = kzalloc(len, GFP_KERNEL);
- if (!vvfl)
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
+ }
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
}
if (!flags) {
- adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+ I40EVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+ I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
@@ -965,23 +1001,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
+ bool link_up = vpe->event_data.link_event.link_status;
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
- if (adapter->link_up !=
- vpe->event_data.link_event.link_status) {
- adapter->link_up =
- vpe->event_data.link_event.link_status;
- if (adapter->link_up) {
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
- } else {
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
- }
- i40evf_print_link_message(adapter);
+
+ /* we've already got the right link status, bail */
+ if (adapter->link_up == link_up)
+ break;
+
+ /* If we get link up message and start queues before
+ * our queues are configured it will trigger a TX hang.
+ * In that case, just ignore the link status message,
+ * we'll get another one after we enable queues and
+ * actually prepared to send traffic.
+ */
+ if (link_up && adapter->state != __I40EVF_RUNNING)
+ break;
+
+ adapter->link_up = link_up;
+ if (link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
}
+ i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 92845692087a..1c6b8d9176a8 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -690,6 +690,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+unsigned int igb_get_max_rss_queues(struct igb_adapter *);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d06a8db514d4..606e6761758f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3338,37 +3338,7 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
static unsigned int igb_max_channels(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- unsigned int max_combined = 0;
-
- switch (hw->mac.type) {
- case e1000_i211:
- max_combined = IGB_MAX_RX_QUEUES_I211;
- break;
- case e1000_82575:
- case e1000_i210:
- max_combined = IGB_MAX_RX_QUEUES_82575;
- break;
- case e1000_i350:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 1;
- break;
- }
- /* fall through */
- case e1000_82576:
- if (!!adapter->vfs_allocated_count) {
- max_combined = 2;
- break;
- }
- /* fall through */
- case e1000_82580:
- case e1000_i354:
- default:
- max_combined = IGB_MAX_RX_QUEUES;
- break;
- }
-
- return max_combined;
+ return igb_get_max_rss_queues(adapter);
}
static void igb_get_channels(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c208753ff5b7..b88fae785369 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1744,6 +1744,20 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
* value = idleSlope * 61034
* ----------------- (E6)
* 1000000
+ *
+ * NOTE: For i210, given the above, we can see that idleslope
+ * is represented in 16.38431 kbps units by the value at
+ * the TQAVCC register (1Gbps / 61034), which reduces
+ * the granularity for idleslope increments.
+ * For instance, if you want to configure a 2576kbps
+ * idleslope, the value to be written on the register
+ * would have to be 157.23. If rounded down, you end
+ * up with less bandwidth available than originally
+ * required (~2572 kbps). If rounded up, you end up
+ * with a higher bandwidth (~2589 kbps). Below the
+ * approach we take is to always round up the
+ * calculated value, so the resulting bandwidth might
+ * be slightly higher for some configurations.
*/
value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
@@ -3200,8 +3214,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* if allocation failed then we do not support SR-IOV */
if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev,
- "Unable to allocate memory for VF Data Storage\n");
err = -ENOMEM;
goto out;
}
@@ -3373,10 +3385,10 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#endif /* CONFIG_PCI_IOV */
}
-static void igb_init_queue_configuration(struct igb_adapter *adapter)
+unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 max_rss_queues;
+ unsigned int max_rss_queues;
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
@@ -3407,6 +3419,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
break;
}
+ return max_rss_queues;
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igb_get_max_rss_queues(adapter);
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
igb_set_flag_queue_pairs(adapter, max_rss_queues);
@@ -3676,7 +3696,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
int igb_close(struct net_device *netdev)
{
- if (netif_device_present(netdev))
+ if (netif_device_present(netdev) || netdev->dismantle)
return __igb_close(netdev, false);
return 0;
}
@@ -8718,7 +8738,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
/* Indicate to hardware the Address is Valid. */
if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
- rar_high |= E1000_RAH_AV;
+ if (is_valid_ether_addr(addr))
+ rar_high |= E1000_RAH_AV;
if (hw->mac.type == e1000_82575)
rar_high |= E1000_RAH_POOL_1 *
@@ -8756,17 +8777,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
+ * flag and allows to overwrite the MAC via VF netdev. This
+ * is necessary to allow libvirt a way to restore the original
+ * MAC after unbinding vfio-pci and reloading igbvf after shutting
+ * down a VM.
+ */
+ if (is_zero_ether_addr(mac)) {
+ adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev,
+ "remove administratively set MAC on VF %d\n",
+ vf);
+ } else if (is_valid_ether_addr(mac)) {
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
+ mac, vf);
+ dev_info(&adapter->pdev->dev,
+ "Reload the VF driver to make this change effective.");
+ /* Generate additional warning if PF is down */
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
+ }
+ } else {
return -EINVAL;
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev,
- "Reload the VF driver to make this change effective.");
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF MAC address has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before attempting to use the VF device.\n");
}
return igb_set_vf_mac(adapter, vf, mac);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 841c2a083349..0746b19ec6d3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return;
}
@@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
*/
void igb_ptp_tx_hang(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
IGB_PTP_TX_TIMEOUT);
@@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
+ /* Clear the tx valid bit in TSYNCTXCTL register to enable
+ * interrupt
+ */
+ rd32(E1000_TXSTMPH);
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 35e6fa643c7e..8319465eb38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
+ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c3555a629..c1e3a0039ea5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,7 +52,9 @@
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
+#include "ixgbe_ipsec.h"
+#include <net/xdp.h>
#include <net/busy_poll.h>
/* common prefix used by pr_<> macros */
@@ -170,10 +172,11 @@ enum ixgbe_tx_flags {
IXGBE_TX_FLAGS_CC = 0x08,
IXGBE_TX_FLAGS_IPV4 = 0x10,
IXGBE_TX_FLAGS_CSUM = 0x20,
+ IXGBE_TX_FLAGS_IPSEC = 0x40,
/* software defined flags */
- IXGBE_TX_FLAGS_SW_VLAN = 0x40,
- IXGBE_TX_FLAGS_FCOE = 0x80,
+ IXGBE_TX_FLAGS_SW_VLAN = 0x80,
+ IXGBE_TX_FLAGS_FCOE = 0x100,
};
/* VLAN info */
@@ -332,7 +335,6 @@ struct ixgbe_ring {
struct net_device *netdev; /* netdev ring belongs to */
struct bpf_prog *xdp_prog;
struct device *dev; /* device for DMA mapping */
- struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -371,6 +373,7 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -395,8 +398,7 @@ enum ixgbe_ring_f_enum {
#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
-#define IXGBE_MAX_MACVLANS 31
-#define IXGBE_MAX_DCBMACVLANS 8
+#define IXGBE_MAX_MACVLANS 63
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
@@ -629,15 +631,18 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
+#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr_setting;
u16 tx_work_limit;
+ u64 tx_ipsec;
/* Rx fast path data */
int num_rx_queues;
u16 rx_itr_setting;
+ u64 rx_ipsec;
/* Port number used to identify VXLAN traffic */
__be16 vxlan_port;
@@ -674,6 +679,7 @@ struct ixgbe_adapter {
struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 hw_tcs;
u8 dcb_set_bitmap;
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
@@ -721,8 +727,7 @@ struct ixgbe_adapter {
u16 bridge_mode;
- u16 eeprom_verh;
- u16 eeprom_verl;
+ char eeprom_id[NVM_VER_SIZE];
u16 eeprom_cap;
u32 interrupt_event;
@@ -766,7 +771,8 @@ struct ixgbe_adapter {
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+ /* Bitmask indicating in use pools */
+ DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
#define IXGBE_MAX_LINK_HANDLE 10
struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
@@ -780,6 +786,10 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
+
+#ifdef CONFIG_XFRM
+ struct ixgbe_ipsec *ipsec;
+#endif /* CONFIG_XFRM */
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1010,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+#ifdef CONFIG_XFRM_OFFLOAD
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd);
+#else
+static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb) { };
+static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd) { return 0; };
+#endif /* CONFIG_XFRM_OFFLOAD */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 8a32eb7d47b9..a0ebd9ecf243 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -431,6 +431,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -1054,7 +1055,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index d602637ccc40..4dfc81dbee4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -221,7 +221,7 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -1310,10 +1310,11 @@ do { \
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
*
* This function is almost identical to the function above but contains
- * several optomizations such as unwinding all of the loops, letting the
+ * several optimizations such as unwinding all of the loops, letting the
* compiler work out all of the conditional ifs since the keys are static
* defines, and computing two keys at once since the hashed dword stream
* will be the same for both keys.
@@ -1446,7 +1447,7 @@ do { \
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applies the input_mask
@@ -2078,6 +2079,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2131,6 +2133,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255f6a18..61188f343955 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1613,6 +1613,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
@@ -1667,7 +1668,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
@@ -2037,7 +2038,7 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -3086,6 +3087,8 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
*
* return the VLVF index where this VLAN id should be placed
*
@@ -3476,7 +3479,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
@@ -4028,6 +4031,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e2e06c..4d4c02366cb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 072ef3b5fc61..aaea8282bfd2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -39,6 +39,10 @@
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame: maximum frame size
*/
static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
@@ -72,8 +76,10 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame: Maximum frame size
+ * @direction: Configuring either Tx or Rx
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index b79e93a5b699..f94c7e82a30b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -34,7 +34,9 @@
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
@@ -91,7 +93,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -137,7 +142,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
@@ -184,7 +192,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
@@ -269,7 +277,11 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1011d644978f..1eed6811e914 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -38,6 +38,7 @@
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
@@ -148,6 +149,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
@@ -344,11 +346,12 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
* @prio_type: priority type indexed by traffic class
- * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 78c52375acc6..b33f3f87e4b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -571,7 +571,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
return -EINVAL;
- if (max_tc != netdev_get_num_tc(dev)) {
+ if (max_tc != adapter->hw_tcs) {
err = ixgbe_setup_tc(dev, max_tc);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 5e2c1e35e517..ad54080488ee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -249,7 +249,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
- * @pf: the pf that is stopping
+ * @adapter: the adapter that is exiting
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2a3667..221f15803480 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
{"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
{"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
+ {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
+ {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -1014,16 +1016,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 nvm_track_id;
strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version));
- nvm_track_id = (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl;
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- nvm_track_id);
+ strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -1156,6 +1155,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
if (err) {
@@ -3082,26 +3085,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
- /* fallthrough */
+ break;
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
-
info->rx_filters |=
BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
@@ -3110,13 +3096,31 @@ static int ixgbe_get_ts_info(struct net_device *dev,
default:
return ethtool_op_get_ts_info(dev, info);
}
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
return 0;
}
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
unsigned int max_combined;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/* We only support one q_vector without MSI-X */
@@ -3173,7 +3177,7 @@ static void ixgbe_get_channels(struct net_device *dev,
return;
/* same thing goes for being DCB enabled */
- if (netdev_get_num_tc(dev) > 1)
+ if (adapter->hw_tcs > 1)
return;
/* if ATR is disabled we can exit */
@@ -3219,7 +3223,7 @@ static int ixgbe_set_channels(struct net_device *dev,
#endif
/* use setup TC to update any traffic class queue mapping */
- return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ return ixgbe_setup_tc(dev, adapter->hw_tcs);
}
static int ixgbe_get_module_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5411a0..7a09a40e4472 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -150,6 +150,7 @@ skip_ddpinv:
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
+ * @target_mode: 1 to setup target mode, 0 to setup initiator mode
*
* Returns : 1 for success and 0 for no ddp
*/
@@ -1034,11 +1035,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
ixgbe_driver_version);
/* Firmware Version */
- snprintf(info->firmware_version,
- sizeof(info->firmware_version),
- "0x%08x",
- (adapter->eeprom_verh << 16) |
- adapter->eeprom_verl);
+ strlcpy(info->firmware_version, adapter->eeprom_id,
+ sizeof(info->firmware_version));
/* Model */
if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -1066,7 +1064,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/**
* ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* Return : TC that FCoE is mapped to
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 000000000000..93eacddb6704
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,941 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux driver
+ * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "ixgbe.h"
+#include <net/xfrm.h>
+#include <crypto/aead.h>
+
+/**
+ * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @key: key byte array
+ * @salt: salt bytes
+ **/
+static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
+ u32 key[], u32 salt)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_FLUSH(hw);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_item - set an Rx table item
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @tbl: table selector
+ *
+ * Trigger the device to store into a particular Rx table the
+ * data that has already been loaded into the input register
+ **/
+static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
+ enum ixgbe_ipsec_tbl_sel tbl)
+{
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
+ reg &= IXGBE_RXTXIDX_IPS_EN;
+ reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
+ idx << IXGBE_RXTXIDX_IDX_SHIFT |
+ IXGBE_RXTXIDX_WRITE;
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @spi: security parameter index
+ * @key: key byte array
+ * @salt: salt bytes
+ * @mode: rx decrypt control bits
+ * @ip_idx: index into IP table for related IP address
+ **/
+static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
+ u32 key[], u32 salt, u32 mode, u32 ip_idx)
+{
+ int i;
+
+ /* store the SPI (in bigendian) and IPidx */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
+
+ /* store the key, salt, and mode */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
+}
+
+/**
+ * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
+ * @hw: hw specific details
+ * @idx: register index to write
+ * @addr: IP address byte array
+ **/
+static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
+{
+ int i;
+
+ /* store the ip address */
+ for (i = 0; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
+}
+
+/**
+ * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 buf[4] = {0, 0, 0, 0};
+ u16 idx;
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+
+ /* scrub the tables - split the loops for the max of the IP table */
+ for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
+ }
+ for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ }
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+}
+
+/**
+ * ixgbe_ipsec_stop_data
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link = adapter->link_up;
+ u32 t_rdy, r_rdy;
+ u32 limit;
+ u32 reg;
+
+ /* halt data paths */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* If the tx fifo doesn't have link, but still has data,
+ * we can't clear the tx sec block. Set the MAC loopback
+ * before block clear
+ */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ mdelay(3);
+ }
+
+ /* wait for the paths to empty */
+ limit = 20;
+ do {
+ mdelay(10);
+ t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
+ IXGBE_SECTXSTAT_SECTX_RDY;
+ r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
+ IXGBE_SECRXSTAT_SECRX_RDY;
+ } while (!t_rdy && !r_rdy && limit--);
+
+ /* undo loopback if we played with it earlier */
+ if (!link) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg &= ~IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ reg &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * ixgbe_ipsec_stop_engine
+ * @adapter: board private structure
+ **/
+static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* disable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
+
+ /* disable the Rx and Tx engines and full packet store-n-forward */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ reg |= IXGBE_SECTXCTRL_SECTX_DIS;
+ reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ reg |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
+
+ /* restore the "tx security buffer almost full threshold" to 0x250 */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
+
+ /* Set minimum IFG between packets back to the default 0x1 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x1;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* final set for normal (no ipsec offload) processing */
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_start_engine
+ * @adapter: board private structure
+ *
+ * NOTE: this increases power consumption whether being used or not
+ **/
+static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ ixgbe_ipsec_stop_data(adapter);
+
+ /* Set minimum IFG between packets to 3 */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg = (reg & 0xfffffff0) | 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+ /* Set "tx security buffer almost full threshold" to 0x15 so that the
+ * almost full indication is generated only after buffer contains at
+ * least an entire jumbo packet.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
+ reg = (reg & 0xfffffc00) | 0x15;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
+
+ /* restart the data paths by clearing the DISABLE bits */
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
+
+ /* enable Rx and Tx SA lookup */
+ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
+ * @adapter: board private structure
+ **/
+void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
+ return;
+
+ /* clean up and restart the engine */
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+ ixgbe_ipsec_start_engine(adapter);
+
+ /* reload the IP addrs */
+ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
+ struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
+
+ if (ipsa->used)
+ ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
+ }
+
+ /* reload the Rx and Tx keys */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ struct rx_sa *rsa = &ipsec->rx_tbl[i];
+ struct tx_sa *tsa = &ipsec->tx_tbl[i];
+
+ if (rsa->used)
+ ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
+ rsa->key, rsa->salt,
+ rsa->mode, rsa->iptbl_ind);
+
+ if (tsa->used)
+ ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
+ }
+}
+
+/**
+ * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
+ * @ipsec: pointer to ipsec struct
+ * @rxtable: true if we need to look in the Rx table
+ *
+ * Returns the first unused index in either the Rx or Tx SA table
+ **/
+static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
+{
+ u32 i;
+
+ if (rxtable) {
+ if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->rx_tbl[i].used)
+ return i;
+ }
+ } else {
+ if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search tx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->tx_tbl[i].used)
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * ixgbe_ipsec_find_rx_state - find the state that matches
+ * @ipsec: pointer to ipsec struct
+ * @daddr: inbound address to match
+ * @proto: protocol to match
+ * @spi: SPI to match
+ * @ip4: true if using an ipv4 address
+ *
+ * Returns a pointer to the matching SA state information
+ **/
+static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
+ __be32 *daddr, u8 proto,
+ __be32 spi, bool ip4)
+{
+ struct rx_sa *rsa;
+ struct xfrm_state *ret = NULL;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
+ if (spi == rsa->xs->id.spi &&
+ ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
+ (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
+ sizeof(rsa->xs->id.daddr.a6)))) &&
+ proto == rsa->xs->id.proto) {
+ ret = rsa->xs;
+ xfrm_state_hold(ret);
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @xs: pointer to xfrm_state struct
+ * @mykey: pointer to key array to populate
+ * @mysalt: pointer to salt value to populate
+ *
+ * This copies the protocol keys and salt to our own data tables. The
+ * 82599 family only supports the one algorithm.
+ **/
+static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ int key_len;
+
+ if (xs->aead) {
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+ } else {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* The key bytes come down in a bigendian array of bytes, so
+ * we don't need to do any byteswapping.
+ * 160 accounts for 16 byte key and 4 byte salt
+ */
+ if (key_len == 160) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len != 128) {
+ netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
+ return -EINVAL;
+ } else {
+ netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
+ *mysalt = 0;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int checked, match, first;
+ u16 sa_idx;
+ int ret;
+ int i;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa rsa;
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&rsa, 0, sizeof(rsa));
+ rsa.used = true;
+ rsa.xs = xs;
+
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.decrypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ return ret;
+ }
+
+ /* get ip for rx sa table */
+ if (xs->props.family == AF_INET6)
+ memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
+
+ /* The HW does not have a 1:1 mapping from keys to IP addrs, so
+ * check for a matching IP addr entry in the table. If the addr
+ * already exists, use it; else find an unused slot and add the
+ * addr. If one does not exist and there are no unused table
+ * entries, fail the request.
+ */
+
+ /* Find an existing match or first not used, and stop looking
+ * after we've checked all we know we have.
+ */
+ checked = 0;
+ match = -1;
+ first = -1;
+ for (i = 0;
+ i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
+ (checked < ipsec->num_rx_sa || first < 0);
+ i++) {
+ if (ipsec->ip_tbl[i].used) {
+ if (!memcmp(ipsec->ip_tbl[i].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr))) {
+ match = i;
+ break;
+ }
+ checked++;
+ } else if (first < 0) {
+ first = i; /* track the first empty seen */
+ }
+ }
+
+ if (ipsec->num_rx_sa == 0)
+ first = 0;
+
+ if (match >= 0) {
+ /* addrs are the same, we should use this one */
+ rsa.iptbl_ind = match;
+ ipsec->ip_tbl[match].ref_cnt++;
+
+ } else if (first >= 0) {
+ /* no matches, but here's an empty slot */
+ rsa.iptbl_ind = first;
+
+ memcpy(ipsec->ip_tbl[first].ipaddr,
+ rsa.ipaddr, sizeof(rsa.ipaddr));
+ ipsec->ip_tbl[first].ref_cnt = 1;
+ ipsec->ip_tbl[first].used = true;
+
+ ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
+
+ } else {
+ /* no match and no empty slot */
+ netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ memset(&rsa, 0, sizeof(rsa));
+ return -ENOSPC;
+ }
+
+ rsa.mode = IXGBE_RXMOD_VALID;
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
+ if (rsa.decrypt)
+ rsa.mode |= IXGBE_RXMOD_DECRYPT;
+ if (rsa.xs->props.family == AF_INET6)
+ rsa.mode |= IXGBE_RXMOD_IPV6;
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
+ rsa.salt, rsa.mode, rsa.iptbl_ind);
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
+
+ ipsec->num_rx_sa++;
+
+ /* hash the new entry for faster search in Rx path */
+ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
+ rsa.xs->id.spi);
+ } else {
+ struct tx_sa tsa;
+
+ /* find the first unused index */
+ ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Tx table\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&tsa, 0, sizeof(tsa));
+ tsa.used = true;
+ tsa.xs = xs;
+
+ if (xs->id.proto & IPPROTO_ESP)
+ tsa.encrypt = xs->ealg || xs->aead;
+
+ ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ memset(&tsa, 0, sizeof(tsa));
+ return ret;
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
+
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
+
+ ipsec->num_tx_sa++;
+ }
+
+ /* enable the engine if not already warmed up */
+ if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
+ ixgbe_ipsec_start_engine(adapter);
+ adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 zerobuf[4] = {0, 0, 0, 0};
+ u16 sa_idx;
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa *rsa;
+ u8 ipi;
+
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+ rsa = &ipsec->rx_tbl[sa_idx];
+
+ if (!rsa->used) {
+ netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
+ hash_del_rcu(&rsa->hlist);
+
+ /* if the IP table entry is referenced by only this SA,
+ * i.e. ref_cnt is only 1, clear the IP table entry as well
+ */
+ ipi = rsa->iptbl_ind;
+ if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
+ ipsec->ip_tbl[ipi].ref_cnt--;
+
+ if (!ipsec->ip_tbl[ipi].ref_cnt) {
+ memset(&ipsec->ip_tbl[ipi], 0,
+ sizeof(struct rx_ip_sa));
+ ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
+ }
+ }
+
+ memset(rsa, 0, sizeof(struct rx_sa));
+ ipsec->num_rx_sa--;
+ } else {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+
+ if (!ipsec->tx_tbl[sa_idx].used) {
+ netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
+ memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
+ ipsec->num_tx_sa--;
+ }
+
+ /* if there are no SAs left, stop the engine to save energy */
+ if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
+ adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
+ ixgbe_ipsec_stop_engine(adapter);
+ }
+}
+
+/**
+ * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ if (xs->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl != 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_ipsec_free - called by xfrm garbage collections
+ * @xs: pointer to transformer state struct
+ *
+ * We don't have any garbage to collect, so we shouldn't bother
+ * implementing this function, but the XFRM code doesn't check for
+ * existence before calling the API callback.
+ **/
+static void ixgbe_ipsec_free(struct xfrm_state *xs)
+{
+}
+
+static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
+ .xdo_dev_state_add = ixgbe_ipsec_add_sa,
+ .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
+ .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
+ .xdo_dev_state_free = ixgbe_ipsec_free,
+};
+
+/**
+ * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
+ * @tx_ring: outgoing context
+ * @first: current data packet
+ * @itd: ipsec Tx data for later use in building context descriptor
+ **/
+int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_state *xs;
+ struct tx_sa *tsa;
+
+ if (unlikely(!first->skb->sp->len)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
+ __func__, first->skb->sp->len);
+ return 0;
+ }
+
+ xs = xfrm_input_state(first->skb);
+ if (unlikely(!xs)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
+ __func__, xs);
+ return 0;
+ }
+
+ itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+ if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
+ __func__, itd->sa_idx, xs->xso.offload_handle);
+ return 0;
+ }
+
+ tsa = &ipsec->tx_tbl[itd->sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
+ __func__, itd->sa_idx);
+ return 0;
+ }
+
+ first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
+
+ itd->flags = 0;
+ if (xs->id.proto == IPPROTO_ESP) {
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (first->protocol == htons(ETH_P_IP))
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
+ itd->trailer_len = xs->props.trailer_len;
+ }
+ if (tsa->encrypt)
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
+
+ return 1;
+}
+
+/**
+ * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
+ * @rx_ring: receiving ring
+ * @rx_desc: receive data descriptor
+ * @skb: current data packet
+ *
+ * Determine if there was an ipsec encapsulation noticed, and if so set up
+ * the resulting status for later in the receive stack.
+ **/
+void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
+ IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_offload *xo = NULL;
+ struct xfrm_state *xs = NULL;
+ struct ipv6hdr *ip6 = NULL;
+ struct iphdr *ip4 = NULL;
+ void *daddr;
+ __be32 spi;
+ u8 *c_hdr;
+ u8 proto;
+
+ /* Find the ip and crypto headers in the data.
+ * We can assume no vlan header in the way, b/c the
+ * hw won't recognize the IPsec packet and anyway the
+ * currently vlan device doesn't support xfrm offload.
+ */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
+ ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ daddr = &ip4->daddr;
+ c_hdr = (u8 *)ip4 + ip4->ihl * 4;
+ } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
+ ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ daddr = &ip6->daddr;
+ c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ switch (pkt_info & ipsec_pkt_types) {
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
+ spi = ((struct ip_auth_hdr *)c_hdr)->spi;
+ proto = IPPROTO_AH;
+ break;
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
+ spi = ((struct ip_esp_hdr *)c_hdr)->spi;
+ proto = IPPROTO_ESP;
+ break;
+ default:
+ return;
+ }
+
+ xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
+ if (unlikely(!xs))
+ return;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp))
+ return;
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ adapter->rx_ipsec++;
+}
+
+/**
+ * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
+ * @adapter: board private structure
+ **/
+void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec;
+ size_t size;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ goto err1;
+ hash_init(ipsec->rx_sa_list);
+
+ size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->rx_tbl)
+ goto err2;
+
+ size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->tx_tbl)
+ goto err2;
+
+ size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
+ ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->ip_tbl)
+ goto err2;
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+
+ adapter->ipsec = ipsec;
+ ixgbe_ipsec_stop_engine(adapter);
+ ixgbe_ipsec_clear_hw_tables(adapter);
+
+ adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+ adapter->netdev->features |= NETIF_F_HW_ESP;
+ adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ return;
+
+err2:
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+err1:
+ kfree(adapter->ipsec);
+ netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+}
+
+/**
+ * ixgbe_stop_ipsec_offload - tear down the ipsec offload
+ * @adapter: board private structure
+ **/
+void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+
+ adapter->ipsec = NULL;
+ if (ipsec) {
+ kfree(ipsec->ip_tbl);
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+ kfree(ipsec);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 000000000000..da3ce7849e85
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program. If not, see <http://www.gnu.org/licenses/>.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_IPSEC_H_
+#define _IXGBE_IPSEC_H_
+
+#define IXGBE_IPSEC_MAX_SA_COUNT 1024
+#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128
+#define IXGBE_IPSEC_BASE_RX_INDEX 0
+#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
+
+#define IXGBE_RXTXIDX_IPS_EN 0x00000001
+#define IXGBE_RXIDX_TBL_SHIFT 1
+enum ixgbe_ipsec_tbl_sel {
+ ips_rx_ip_tbl = 0x01,
+ ips_rx_spi_tbl = 0x02,
+ ips_rx_key_tbl = 0x03,
+};
+
+#define IXGBE_RXTXIDX_IDX_SHIFT 3
+#define IXGBE_RXTXIDX_READ 0x40000000
+#define IXGBE_RXTXIDX_WRITE 0x80000000
+
+#define IXGBE_RXMOD_VALID 0x00000001
+#define IXGBE_RXMOD_PROTO_ESP 0x00000004
+#define IXGBE_RXMOD_DECRYPT 0x00000008
+#define IXGBE_RXMOD_IPV6 0x00000010
+
+struct rx_sa {
+ struct hlist_node hlist;
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ u32 mode;
+ u8 iptbl_ind;
+ bool used;
+ bool decrypt;
+};
+
+struct rx_ip_sa {
+ __be32 ipaddr[4];
+ u32 ref_cnt;
+ bool used;
+};
+
+struct tx_sa {
+ struct xfrm_state *xs;
+ u32 key[4];
+ u32 salt;
+ bool encrypt;
+ bool used;
+};
+
+struct ixgbe_ipsec_tx_data {
+ u32 flags;
+ u16 trailer_len;
+ u16 sa_idx;
+};
+
+struct ixgbe_ipsec {
+ u16 num_rx_sa;
+ u16 num_tx_sa;
+ struct rx_ip_sa *ip_tbl;
+ struct rx_sa *rx_tbl;
+ struct tx_sa *tx_tbl;
+ DECLARE_HASHTABLE(rx_sa_list, 10);
+};
+#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957aca18..4242f0213e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
- u16 reg_idx;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u16 reg_idx, pool;
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= tcs)
+ if ((reg_idx & ~vmdq->mask) >= tcs) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
reg_idx++;
}
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
- struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
*tx = 0;
*rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
**/
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
- struct net_device *dev = adapter->netdev;
+ u8 num_tcs = adapter->hw_tcs;
unsigned int tx_idx, rx_idx;
int tc, offset, rss_i, i;
- u8 num_tcs = netdev_get_num_tc(dev);
/* verify we have DCB queueing enabled before proceeding */
if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
adapter->tx_ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->rx_ring[offset + i]->netdev = adapter->netdev;
adapter->tx_ring[offset + i]->dcb_tc = tc;
adapter->rx_ring[offset + i]->dcb_tc = tc;
}
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+ u16 reg_idx, pool;
int i;
- u16 reg_idx;
/* only proceed if VMDq is enabled */
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
break;
#endif
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
#ifdef IXGBE_FCOE
/* FCoE uses a linear block of queues so just assigning 1:1 */
- for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ for (; i < adapter->num_rx_queues; i++, reg_idx++) {
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
#endif
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i, reg_idx;
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->reg_idx = i;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
adapter->tx_ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -350,6 +362,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit VMDq instances on the PF by number of Tx queues */
+ vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -437,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
int tcs;
/* Map queue offset and counts onto allocated tx queues */
- tcs = netdev_get_num_tc(dev);
+ tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
if (tcs <= 1)
@@ -512,12 +527,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
+ /* limit l2fwd RSS based on total Tx queue limit */
+ rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
/* Add starting offset to total pool count */
vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
@@ -525,7 +542,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+ if (vmdq_i > 32) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -602,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
#endif
+ /* populate TC0 for use by pool 0 */
+ netdev_set_tc_queue(adapter->netdev, 0,
+ adapter->num_rx_queues_per_pool, 0);
+
return true;
}
@@ -701,7 +722,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
- adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_pools = 1;
adapter->num_rx_queues_per_pool = 1;
#ifdef CONFIG_IXGBE_DCB
@@ -834,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
ring_count = txr_count + rxr_count + xdp_count;
size = sizeof(struct ixgbe_q_vector) +
@@ -917,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- txr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = txr_idx;
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -991,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- if (adapter->num_rx_pools > 1)
- ring->queue_index =
- rxr_idx % adapter->num_rx_queues_per_pool;
- else
- ring->queue_index = rxr_idx;
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
@@ -1171,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
*/
/* Disable DCB unless we only have a single traffic class */
- if (netdev_get_num_tc(adapter->netdev) > 1) {
+ if (adapter->hw_tcs > 1) {
e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
@@ -1183,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
}
+ adapter->hw_tcs = 0;
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
@@ -1268,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
}
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
- u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+ u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -1282,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62a18914f00f..0da5aa2c8aba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
- u32 head, tail;
+ unsigned int head, tail;
- if (ring->l2_accel_priv)
- adapter = ring->l2_accel_priv->real_adapter;
- else
- adapter = netdev_priv(ring->netdev);
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
- hw = &adapter->hw;
- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
-
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
+ return ((head <= tail) ? tail : tail + ring->count) - head;
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -1133,6 +1128,9 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate
**/
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
@@ -1173,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
@@ -1204,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
+ total_ipsec++;
/* free the skb */
if (ring_is_xdp(tx_ring))
@@ -1266,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
@@ -1754,9 +1755,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
+ ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
skb->protocol = eth_type_trans(skb, dev);
+
+ /* record Rx queue, or update MACVLAN statistics */
+ if (netif_is_ixgbe(dev))
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ else
+ macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
+ (skb->pkt_type == PACKET_BROADCAST) ||
+ (skb->pkt_type == PACKET_MULTICAST));
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1921,10 +1931,13 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
if (IS_ERR(skb))
return true;
- /* verify that the packet does not have any known errors */
- if (unlikely(ixgbe_test_staterr(rx_desc,
- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
- !(netdev->features & NETIF_F_RXALL))) {
+ /* Verify netdev is present, and that packet does not have any
+ * errors that would be unacceptable to the netdev.
+ */
+ if (!netdev ||
+ (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
@@ -2021,8 +2034,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of data in rx_buffer
*
* This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is
@@ -2318,12 +2331,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
bool xdp_xmit = false;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
- struct xdp_buff xdp;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2515,13 +2530,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
-enum latency_range {
- lowest_latency = 0,
- low_latency = 1,
- bulk_latency = 2,
- latency_invalid = 255
-};
-
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
@@ -2534,8 +2542,6 @@ enum latency_range {
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see ixgbe_param.c)
**/
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
@@ -3020,6 +3026,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @queues: enable irqs for queues
+ * @flush: flush register write
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
@@ -3475,6 +3483,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ * @adapter: board private structure
*
**/
static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
@@ -3586,7 +3595,7 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs, mtqc;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -3853,16 +3862,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
u32 vfreta = 0;
- unsigned int pf_pool = adapter->num_vfs;
/* Write redirection table to HW */
for (i = 0; i < reta_entries; i++) {
+ u16 pool = adapter->num_rx_pools;
+
vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
- if ((i & 3) == 3) {
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+ if ((i & 3) != 3)
+ continue;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
vfreta);
- vfreta = 0;
- }
+ vfreta = 0;
}
}
@@ -3899,13 +3912,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- unsigned int pf_pool = adapter->num_vfs;
int i, j;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
- *(adapter->rss_key + i));
+ for (i = 0; i < 10; i++) {
+ u16 pool = adapter->num_rx_pools;
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+ *(adapter->rss_key + i));
+ }
/* Fill out the redirection table */
for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3933,7 +3950,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_RSS].mask)
mrqc = IXGBE_MRQC_RSSEN;
} else {
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
if (tcs > 4)
@@ -3971,7 +3988,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
if ((hw->mac.type >= ixgbe_mac_X550) &&
(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
- unsigned int pf_pool = adapter->num_vfs;
+ u16 pool = adapter->num_rx_pools;
/* Enable VF RSS mode */
mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3981,7 +3998,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
ixgbe_setup_vfreta(adapter);
vfmrqc = IXGBE_MRQC_RSSEN;
vfmrqc |= rss_field;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+ while (pool--)
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PFVFMRQC(VMDQ_P(pool)),
+ vfmrqc);
} else {
ixgbe_setup_reta(adapter);
mrqc |= rss_field;
@@ -3991,8 +4012,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
- * @adapter: address of board private structure
- * @index: index of ring to set
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
**/
static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
@@ -4112,11 +4133,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
#if (PAGE_SIZE < 8192)
- } else {
+ /* RXDCTL.RLPML does not work on 82599 */
+ } else if (hw->mac.type != ixgbe_mac_82599EB) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
- /* Limit the maximum frame size so we don't overrun the skb */
+ /* Limit the maximum frame size so we don't overrun the skb.
+ * This can happen in SRIOV mode when the MTU of the VF is
+ * higher than the MTU of the PF.
+ */
if (ring_uses_build_skb(ring) &&
!test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
@@ -4144,7 +4169,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- u16 pool;
+ u16 pool = adapter->num_rx_pools;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4161,7 +4186,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1u << 29;
- for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ while (pool--)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4488,8 +4513,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4525,8 +4551,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (ring->l2_accel_priv)
+ if (!netif_is_ixgbe(ring->netdev))
continue;
+
j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
@@ -4846,9 +4873,11 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
return -ENOMEM;
}
+
/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
+ * @vfn: pool to associate with unicast addresses
*
* Writes unicast address list to the RAR table.
* Returns: -ENOMEM on failure/insufficient address space
@@ -5195,7 +5224,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int num_tc = netdev_get_num_tc(adapter->netdev);
+ int num_tc = adapter->hw_tcs;
int i;
if (!num_tc)
@@ -5218,7 +5247,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int hdrm;
- u8 tc = netdev_get_num_tc(adapter->netdev);
+ u8 tc = adapter->hw_tcs;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -5277,29 +5306,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int rss_i = adapter->num_rx_queues_per_pool;
- struct ixgbe_hw *hw = &adapter->hw;
- u16 pool = vadapter->pool;
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_L2HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- if (rss_i > 3)
- psrtype |= 2u << 29;
- else if (rss_i > 1)
- psrtype |= 1u << 29;
-
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
@@ -5352,96 +5358,45 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
rx_ring->next_to_use = 0;
}
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
- struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
- int index = rx_ring->queue_index + vadapter->rx_base_queue;
-
- /* shutdown specific queue receive and wait for dma to settle */
- ixgbe_disable_rx_queue(adapter, rx_ring);
- usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
- ixgbe_clean_rx_ring(rx_ring);
- rx_ring->l2_accel_priv = NULL;
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
-{
- struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase = accel->rx_base_queue;
- unsigned int txbase = accel->tx_base_queue;
- int i;
-
- netif_tx_stop_all_queues(vdev);
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
- }
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
- adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
- }
-
-
- return 0;
-}
-
static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel)
{
struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase, txbase, queues;
- int i, baseq, err = 0;
+ int i, baseq, err;
- if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ if (!test_bit(accel->pool, adapter->fwd_bitmask))
return 0;
baseq = accel->pool * adapter->num_rx_queues_per_pool;
- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
accel->pool, adapter->num_rx_pools,
- baseq, baseq + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
+ baseq, baseq + adapter->num_rx_queues_per_pool);
accel->netdev = vdev;
- accel->rx_base_queue = rxbase = baseq;
- accel->tx_base_queue = txbase = baseq;
+ accel->rx_base_queue = baseq;
+ accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[baseq + i]->netdev = vdev;
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->rx_ring[rxbase + i]->netdev = vdev;
- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
- }
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- adapter->tx_ring[txbase + i]->netdev = vdev;
- adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ /* ixgbe_add_mac_filter will return an index if it succeeds, so we
+ * need to only treat it as an error value if it is negative.
+ */
+ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+ VMDQ_P(accel->pool));
+ if (err >= 0) {
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return 0;
}
- queues = min_t(unsigned int,
- adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
- err = netif_set_real_num_tx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- err = netif_set_real_num_rx_queues(vdev, queues);
- if (err)
- goto fwd_queue_err;
-
- if (is_valid_ether_addr(vdev->dev_addr))
- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ adapter->rx_ring[baseq + i]->netdev = NULL;
- ixgbe_fwd_psrtype(accel);
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
- return err;
-fwd_queue_err:
- ixgbe_fwd_ring_down(vdev, accel);
return err;
}
@@ -5480,6 +5435,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
+ ixgbe_ipsec_restore(adapter);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -5917,21 +5873,6 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
-static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
-
- return 0;
-}
-
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -5961,10 +5902,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- /* disable any upper devices */
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_disable_macvlan, NULL);
-
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -6119,6 +6056,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
+ * @ii: pointer to ixgbe_info for device
*
* ixgbe_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
@@ -6153,6 +6091,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -6302,7 +6241,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
}
/* PF holds first pool slot */
- set_bit(0, &adapter->fwd_bitmask);
+ set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -6402,6 +6341,7 @@ err_setup_tx:
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: pointer to ixgbe_adapter
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
@@ -6444,6 +6384,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -6541,6 +6486,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
ixgbe_clean_rx_ring(rx_ring);
rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -6646,20 +6592,12 @@ int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- if (adapter->num_rx_pools > 1)
- queues = adapter->num_rx_queues_per_pool;
- else
- queues = adapter->num_tx_queues;
-
+ queues = adapter->num_tx_queues;
err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
- if (adapter->num_rx_pools > 1 &&
- adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
- queues = IXGBE_MAX_L2A_QUEUES;
- else
- queues = adapter->num_rx_queues;
+ queues = adapter->num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -6783,7 +6721,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 ctrl, fctrl;
+ u32 ctrl;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
@@ -6808,18 +6746,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
hw->mac.ops.stop_link_on_d3(hw);
if (wufc) {
+ u32 fctrl;
+
ixgbe_set_rx_mode(netdev);
/* enable the optics for 82599 SFP+ fiber as we can WoL */
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- /* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & IXGBE_WUFC_MC) {
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- }
+ /* enable the reception of multicast packets */
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7216,7 +7154,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
* @adapter: pointer to the device adapter structure
- * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -7273,18 +7210,6 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
-static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
-{
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
-
- return 0;
-}
-
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -7338,6 +7263,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_10GB_FULL:
speed_str = "10 Gbps";
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ speed_str = "5 Gbps";
+ break;
case IXGBE_LINK_SPEED_2_5GB_FULL:
speed_str = "2.5 Gbps";
break;
@@ -7365,12 +7293,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable transmits */
netif_tx_wake_all_queues(adapter->netdev);
- /* enable any upper devices */
- rtnl_lock();
- netdev_walk_all_upper_dev_rcu(adapter->netdev,
- ixgbe_enable_macvlan, NULL);
- rtnl_unlock();
-
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -7655,6 +7577,7 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 cap_speed;
u32 speed;
bool autoneg = false;
@@ -7667,16 +7590,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
- hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
- /* setup the highest link when no autoneg */
- if (!autoneg) {
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- speed = IXGBE_LINK_SPEED_10GB_FULL;
- }
- }
+ /* advertise highest capable link speed */
+ if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL);
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -7688,7 +7609,7 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list structure
**/
static void ixgbe_service_timer(struct timer_list *t)
{
@@ -7888,10 +7809,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
}
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first)
+ struct ixgbe_tx_buffer *first,
+ struct ixgbe_ipsec_tx_data *itd)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
+ u32 fceof_saidx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -7932,7 +7855,12 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
+ if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
+ fceof_saidx |= itd->sa_idx;
+ type_tucmd |= itd->flags | itd->trailer_len;
+ }
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7975,11 +7903,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
IXGBE_TX_FLAGS_CSUM,
IXGBE_ADVTXD_POPTS_TXSM);
- /* enble IPv4 checksum for TSO */
+ /* enable IPv4 checksum for TSO */
olinfo_status |= IXGBE_SET_FLAG(tx_flags,
IXGBE_TX_FLAGS_IPV4,
IXGBE_ADVTXD_POPTS_IXSM);
+ /* enable IPsec */
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPSEC,
+ IXGBE_ADVTXD_POPTS_IPSEC);
+
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -8332,14 +8265,19 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
-#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
- struct ixgbe_ring_feature *f;
int txq;
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *f;
#endif
- if (fwd_adapter)
- return skb->queue_mapping + fwd_adapter->tx_base_queue;
+ if (fwd_adapter) {
+ adapter = netdev_priv(dev);
+ txq = reciprocal_scale(skb_get_hash(skb),
+ adapter->num_rx_queues_per_pool);
+
+ return txq + fwd_adapter->tx_base_queue;
+ }
#ifdef IXGBE_FCOE
@@ -8438,6 +8376,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 tx_flags = 0;
unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
__be16 protocol = skb->protocol;
u8 hdr_len = 0;
@@ -8542,11 +8481,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
else if (!tso)
- ixgbe_tx_csum(tx_ring, first);
+ ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
/* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
@@ -8671,7 +8615,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8695,7 +8639,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
* netdev->dev_addrs
- * @netdev: network interface device structure
+ * @dev: network interface device structure
*
* Returns non-zero on failure
**/
@@ -8862,14 +8806,13 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
- * @netdev: net device to configure
+ * @dev: net device to configure
* @tc: number of traffic classes to enable
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8878,10 +8821,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
- return -EBUSY;
-
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -8898,6 +8837,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
+ adapter->hw_tcs = tc;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
@@ -8907,10 +8847,19 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
} else {
netdev_reset_tc(dev);
+ /* To support macvlan offload we have to use num_tc to
+ * restrict the queues that can be used by the device.
+ * By doing this we can avoid reporting a false number of
+ * queues.
+ */
+ if (!tc && adapter->num_rx_pools > 1)
+ netdev_set_num_tc(dev, 1);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->hw_tcs = tc;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -9044,6 +8993,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int num_vfs = adapter->num_vfs, vf;
struct upper_walk_data data;
struct net_device *upper;
@@ -9052,11 +9002,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
for (vf = 0; vf < num_vfs; ++vf) {
upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
if (upper->ifindex == ifindex) {
- if (adapter->num_rx_pools > 1)
- *queue = vf * 2;
- else
- *queue = vf * adapter->num_rx_queues_per_pool;
-
+ *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
*action = vf + 1;
*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
return 0;
@@ -9101,9 +9047,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
/* Redirect to a VF or a offloaded macvlan */
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *dev = tcf_mirred_dev(a);
- err = handle_redirect_action(adapter, ifindex, queue,
+ if (!dev)
+ return -EINVAL;
+ err = handle_redirect_action(adapter, dev->ifindex, queue,
action);
if (err == 0)
return err;
@@ -9362,9 +9310,6 @@ free_jump:
static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -9386,7 +9331,7 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct ixgbe_adapter *adapter = cb_priv;
- if (!tc_can_offload(adapter->netdev))
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -9444,7 +9389,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
rtnl_lock();
- ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ ixgbe_setup_tc(netdev, adapter->hw_tcs);
rtnl_unlock();
}
@@ -9520,7 +9465,7 @@ static int ixgbe_set_features(struct net_device *netdev,
/* We cannot enable ATR if SR-IOV is enabled */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
/* We cannot enable ATR if we have 2 or more tcs */
- (netdev_get_num_tc(netdev) > 1) ||
+ (adapter->hw_tcs > 1) ||
/* We cannot enable ATR if RSS is disabled */
(adapter->ring_feature[RING_F_RSS].limit <= 1) ||
/* A sample rate of 0 indicates ATR disabled */
@@ -9695,8 +9640,8 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/**
* ixgbe_configure_bridge_mode - set various bridge modes
- * @adapter - the private structure
- * @mode - requested bridge mode
+ * @adapter: the private structure
+ * @mode: requested bridge mode
*
* Configure some settings require for various bridge modes.
**/
@@ -9821,6 +9766,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+ int tcs = adapter->hw_tcs ? : 1;
unsigned int limit;
int pool, err;
@@ -9831,24 +9777,8 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
-#ifdef CONFIG_RPS
- if (vdev->num_rx_queues != vdev->num_tx_queues) {
- netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
- vdev->name);
- return ERR_PTR(-EINVAL);
- }
-#endif
- /* Check for hardware restriction on number of rx/tx queues */
- if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
- vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
- netdev_info(pdev,
- "%s: Supports RX/TX Queue counts 1,2, and 4\n",
- pdev->name);
- return ERR_PTR(-EINVAL);
- }
-
if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
@@ -9856,53 +9786,68 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
- adapter->num_rx_pools++;
- set_bit(pool, &adapter->fwd_bitmask);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+ set_bit(pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
- /* Force reinit of ring allocation with VMDQ enabled */
- err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- if (err)
- goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- if (netif_running(pdev)) {
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
+
+ if (!err && netif_running(pdev))
err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
- }
- return fwd_adapter;
-fwd_add_err:
+ if (!err)
+ return fwd_adapter;
+
/* unwind counter and free adapter struct */
netdev_info(pdev,
"%s: dfwd hardware acceleration failed\n", vdev->name);
- clear_bit(pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ clear_bit(pool, adapter->fwd_bitmask);
kfree(fwd_adapter);
return ERR_PTR(err);
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
- unsigned int limit;
+ struct ixgbe_fwd_adapter *accel = priv;
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int limit, i;
+
+ /* delete unicast filter associated with offloaded interface */
+ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+ VMDQ_P(accel->pool));
+
+ /* disable ability to receive packets for this pool */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
- adapter->num_rx_pools--;
+ /* Allow remaining Rx packets to get flushed out of the
+ * Rx FIFO before we drop the netdev for the ring.
+ */
+ usleep_range(10000, 20000);
- limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+ struct ixgbe_q_vector *qv = ring->q_vector;
+
+ /* Make sure we aren't processing any packets and clear
+ * netdev to shut down the ring.
+ */
+ if (netif_running(adapter->netdev))
+ napi_synchronize(&qv->napi);
+ ring->netdev = NULL;
+ }
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
+ limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
@@ -9914,13 +9859,13 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
adapter->ring_feature[RING_F_RSS].limit = rss;
}
- ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
- fwd_adapter->pool, adapter->num_rx_pools,
- fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
- adapter->fwd_bitmask);
- kfree(fwd_adapter);
+ ixgbe_setup_tc(pdev, adapter->hw_tcs);
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
+ accel->pool, adapter->num_rx_pools,
+ accel->rx_base_queue,
+ accel->rx_base_queue +
+ adapter->num_rx_queues_per_pool);
+ kfree(accel);
}
#define IXGBE_MAX_MAC_HDR_LEN 127
@@ -9954,6 +9899,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
features &= ~NETIF_F_TSO;
+#ifdef CONFIG_XFRM_OFFLOAD
+ /* IPsec offload doesn't get along well with others *yet* */
+ if (skb->sp)
+ features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
+#endif
+
return features;
}
@@ -9987,7 +9938,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
- int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+ int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
if (err) {
rcu_assign_pointer(adapter->xdp_prog, old_prog);
@@ -10164,7 +10115,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* ixgbe_wol_supported - Check whether device supports WoL
* @adapter: the adapter private structure
* @device_id: the device ID
- * @subdev_id: the subsystem device ID
+ * @subdevice_id: the subsystem device ID
*
* This function is used by probe and ethtool to determine
* which devices have WoL support
@@ -10233,6 +10184,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ ixgbe_get_orom_version(hw, &nvm_ver);
+
+ if (nvm_ver.or_valid) {
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+ nvm_ver.or_build, nvm_ver.or_patch);
+ return;
+ }
+
+ /* Set ETrack ID format */
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "0x%08x", nvm_ver.etk_id);
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -10508,6 +10494,7 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
+ ixgbe_init_ipsec_offload(adapter);
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -10568,8 +10555,7 @@ skip_sriov:
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+ ixgbe_set_fw_version(adapter);
/* pick up the PCI bus settings for reporting later */
if (ixgbe_pcie_from_parent(hw))
@@ -10744,6 +10730,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 654a402f0e9e..91bde90f9265 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -378,7 +378,7 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
/**
* ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
+ * @phy_id: hardware phy id
*
**/
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
@@ -489,6 +489,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -564,6 +565,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -763,6 +765,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -861,6 +864,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: status of link
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1667,7 +1672,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
@@ -1714,6 +1719,7 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
* @lock: true if to take and release semaphore
*
@@ -1804,6 +1810,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1820,6 +1827,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: device address
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1836,6 +1844,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
* @lock: true if to take and release semaphore
*
@@ -1904,6 +1913,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -1920,6 +1930,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: device address
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ae312c45696a..f6cc9166082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -166,7 +166,7 @@
/**
* ixgbe_ptp_setup_sdp_x540
- * @hw: the hardware private structure
+ * @adapter: private adapter structure
*
* this function enables or disables the clock out feature on SDP0 for
* the X540 device. It will create a 1second periodic output that can
@@ -299,7 +299,7 @@ static u64 ixgbe_ptp_read_82599(const struct cyclecounter *cc)
* ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
* @adapter: private adapter structure
* @hwtstamp: stack timestamp structure
- * @systim: unsigned 64bit system time value
+ * @timestamp: unsigned 64bit system time value
*
* We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
* which can be used by the stack's ptp functions.
@@ -1015,7 +1015,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_set_ts_config - user entry point for timestamp mode
* @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * @ifr: ioctl data
*
* Set hardware to requested mode. If unsupported, return an error with no
* changes. Otherwise, store the mode for future reference.
@@ -1338,7 +1338,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_suspend - stop PTP work items
- * @ adapter: pointer to adapter struct
+ * @adapter: pointer to adapter struct
*
* this function suspends PTP activity, and prevents more PTP work from being
* generated, but does not destroy the PTP clock device.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c6c9ce..27a70a52f3c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -78,12 +78,9 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
-
/* Enable VMDq flag so device will be set in VM mode */
- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
- if (!adapter->ring_feature[RING_F_VMDQ].limit)
- adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED;
/* Allocate memory for per VF control structures */
adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
@@ -227,9 +224,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 gpie;
- u32 vmdctl;
int rss;
/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +265,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
pci_disable_sriov(adapter->pdev);
#endif
- /* turn off device IOV mode */
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* set default pool back to 0 */
- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- IXGBE_WRITE_FLUSH(hw);
-
/* Disable VMDq flag so device will be set in VM mode */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +287,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
- int err = 0;
- u8 num_tc;
- int i;
int pre_existing_vfs = pci_num_vf(dev);
+ int err = 0, num_rx_pools, i, limit;
+ u8 num_tc;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
err = ixgbe_disable_sriov(adapter);
@@ -330,23 +311,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
- num_tc = netdev_get_num_tc(adapter->netdev);
-
- if (num_tc > 4) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
- return -EPERM;
- }
- } else if ((num_tc > 1) && (num_tc <= 4)) {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
- return -EPERM;
- }
- } else {
- if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
- e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
- return -EPERM;
- }
+ num_tc = adapter->hw_tcs;
+ num_rx_pools = adapter->num_rx_pools;
+ limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+ (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
+
+ if (num_vfs > (limit - num_rx_pools)) {
+ e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+ num_tc, num_rx_pools - 1, limit - num_rx_pools);
+ return -EPERM;
}
err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +351,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
int err;
#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+ int prev_num_vf = pci_num_vf(dev);
#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
#ifdef CONFIG_PCI_IOV
- if (!err && current_flags != adapter->flags)
+ if (!err && (current_flags != adapter->flags ||
+ prev_num_vf != pci_num_vf(dev)))
ixgbe_sriov_reinit(adapter);
#endif
@@ -738,7 +713,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+ u8 num_tcs = adapter->hw_tcs;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -946,7 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
{
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- u8 tcs = netdev_get_num_tc(adapter->netdev);
+ u8 tcs = adapter->hw_tcs;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -1034,7 +1009,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
struct net_device *dev = adapter->netdev;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
unsigned int default_tc = 0;
- u8 num_tcs = netdev_get_num_tc(dev);
+ u8 num_tcs = adapter->hw_tcs;
/* verify the PF is supporting the correct APIs */
switch (adapter->vfinfo[vf].vf_api) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5cd0f5..ca45359686d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
@@ -2321,11 +2360,6 @@ enum {
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
@@ -2377,6 +2411,9 @@ enum {
#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */
+#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */
+#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
@@ -2398,6 +2435,7 @@ enum {
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
+#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2464,13 +2502,6 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
-/* Security Processing bit Indication */
-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
-
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
@@ -2484,6 +2515,8 @@ enum {
IXGBE_RXDADV_ERR_LE | \
IXGBE_RXDADV_ERR_PE | \
IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \
+ IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \
IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
@@ -2862,7 +2895,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */
struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens;
- __le32 seqnum_seed;
+ __le32 fceof_saidx;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
@@ -2893,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc {
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2908,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cb7da5f9c4da..f470d0204771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -949,7 +949,7 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 length, bufsz, i, start;
u16 *local_buffer;
- bufsz = sizeof(buf) / sizeof(buf[0]);
+ bufsz = ARRAY_SIZE(buf);
/* Read a chunk at the pointer location */
if (!buffer) {
@@ -1642,10 +1642,12 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
}
/**
- * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
- * @hw: pointer to hardware structure
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait_to_complete: unused
*
- * Configures the extern PHY and the integrated KR PHY for SFP support.
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
@@ -1737,6 +1739,8 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/**
* ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for native SFP support.
*/
@@ -1784,6 +1788,8 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the the integrated PHY for SFP support.
*/
@@ -1859,7 +1865,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait: true when waiting for completion is needed
*
* Setup internal/external PHY link speed based on link speed, then set
* external PHY auto advertised link speed.
@@ -1943,6 +1949,8 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
+ * @speed: unused
+ * @autoneg_wait_to_complete: unused
*/
static s32
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
@@ -2014,6 +2022,8 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
/**
* ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ * @autoneg_wait: true when waiting for completion is needed
*/
static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -3735,6 +3745,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* ixgbe_read_phy_reg_x550a - Reads specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index ff9d05f308ee..4400e49090b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
IXGBEVF_NETDEV_STAT(multicast),
IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+ IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
+ IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+ IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};
#define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 581f44bbd7b3..f6952425c87d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct ixgbevf_stats {
@@ -79,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
struct ixgbevf_rx_queue_stats {
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
u64 csum_err;
};
@@ -260,6 +266,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+#define IXGBEVF_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
/* board specific private data structure */
struct ixgbevf_adapter {
/* this field must be first, see ixgbevf_process_skb_fields */
@@ -287,8 +296,9 @@ struct ixgbevf_adapter {
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
int num_msix_vectors;
- u32 alloc_rx_page_failed;
- u32 alloc_rx_buff_failed;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 alloc_rx_page;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a69134ade..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
{
return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -554,7 +531,6 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -596,8 +572,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -605,13 +581,15 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page);
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ bi->pagecnt_bias = 1;
+ rx_ring->rx_stats.alloc_rx_page++;
return true;
}
@@ -640,6 +618,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if pkt_addr didn't change
* because each write-back erases this info.
*/
@@ -654,8 +638,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
i -= rx_ring->count;
}
- /* clear the hdr_addr for the next_to_use descriptor */
- rx_desc->read.hdr_addr = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -742,12 +726,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
new_buff->page_offset = old_buff->page_offset;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbevf_page_is_reserved(struct page *page)
@@ -755,6 +734,45 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
+ struct page *page,
+ const unsigned int truesize)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbevf_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_ref_count(page) != pagecnt_bias))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
+
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ return false;
+
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -772,12 +790,12 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
**/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
+ u16 size,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
@@ -796,7 +814,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return true;
/* this page cannot be reused so discard it */
- put_page(page);
return false;
}
@@ -816,32 +833,7 @@ add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
- return false;
-
-#endif
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
+ return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
}
static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
@@ -850,11 +842,19 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
{
struct ixgbevf_rx_buffer *rx_buffer;
struct page *page;
+ u16 size = le16_to_cpu(rx_desc->wb.upper.length);
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
if (likely(!skb)) {
void *page_addr = page_address(page) +
rx_buffer->page_offset;
@@ -880,21 +880,18 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
prefetchw(skb->data);
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IXGBEVF_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
/* pull page into skb */
- if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
@@ -931,7 +928,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.length)
break;
/* This memory barrier is needed to keep us from reading
@@ -944,8 +941,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
+ }
cleaned_count++;
@@ -1554,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbevf_tx_buffer) * ring->count);
+
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -1722,6 +1725,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -1750,6 +1754,14 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbevf_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBEVF_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -1896,10 +1908,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
unsigned int flags = netdev->flags;
int xcast_mode;
- xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
- (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
- IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
/* request the most inclusive mode we need */
if (flags & IFF_PROMISC)
xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
@@ -2108,9 +2116,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- unsigned int i;
+ u16 i = rx_ring->next_to_clean;
/* Free Rx ring sk_buff */
if (rx_ring->skb) {
@@ -2118,29 +2124,39 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
/* Free all the Rx ring pages */
- for (i = 0; i < rx_ring->count; i++) {
+ while (i != rx_ring->next_to_alloc) {
struct ixgbevf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_page(rx_buffer->page);
- rx_buffer->page = NULL;
- }
- size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IXGBEVF_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ rx_buffer->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
}
/**
@@ -2149,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
**/
static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned long size;
- unsigned int i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
}
- size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
- memset(tx_ring->desc, 0, tx_ring->size);
}
/**
@@ -2717,6 +2767,8 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
int i;
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2737,15 +2789,23 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc);
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->hw_csum_rx_error +=
- adapter->rx_ring[i]->hw_csum_rx_error;
- adapter->rx_ring[i]->hw_csum_rx_error = 0;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
}
+
+ adapter->hw_csum_rx_error = hw_csum_rx_error;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ adapter->alloc_rx_page = alloc_rx_page;
}
/**
* ixgbevf_service_timer - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list struct
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
@@ -2888,7 +2948,7 @@ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_watchdog_subtask - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * @adapter: board private structure
**/
static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
{
@@ -2985,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -3045,7 +3105,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -3487,34 +3547,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbevf_tx_cmd_type(tx_flags);
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3525,12 +3588,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
@@ -3544,23 +3607,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
@@ -3594,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i-- == 0)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -4368,6 +4437,7 @@ static void __exit ixgbevf_exit_module(void)
/**
* ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
+ * @hw: pointer to private hardware struct
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c25006ce9af..38d3a327c1bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -146,6 +146,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/**
* Hyper-V variant; the VF/PF communication is through the PCI
* config space.
+ * @hw: pointer to private hardware struct
*/
static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
{
@@ -285,7 +286,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -303,7 +304,7 @@ static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
- * @adapter: pointer to the port handle
+ * @hw: pointer to hardware structure
* @reta: buffer to fill with RETA contents.
* @num_rx_queues: Number of Rx queues configured for this port
*
@@ -455,8 +456,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
ether_addr_copy(msg_addr, addr);
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
-
+ ARRAY_SIZE(msgbuf));
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
@@ -536,6 +536,8 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @netdev: unused
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
@@ -571,7 +573,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[1] = xcast_mode;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
return err;
@@ -584,6 +586,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -609,7 +613,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (err)
goto mbx_err;
@@ -626,6 +630,10 @@ mbx_err:
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
@@ -655,7 +663,7 @@ static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * @autoneg_wait_to_complete: unused
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -740,6 +748,10 @@ out:
/**
* Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to private hardware struct
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -813,7 +825,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
msgbuf[1] = max_size;
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
- sizeof(msgbuf) / sizeof(u32));
+ ARRAY_SIZE(msgbuf));
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
@@ -859,8 +871,7 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[1] = api;
msg[2] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -911,8 +922,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = ixgbevf_write_msg_read_ack(hw, msg, msg,
- sizeof(msg) / sizeof(u32));
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index da6fb825afea..ebe5c9148935 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,7 +60,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
depends on HAS_DMA
select MVMDIO
- select FIXED_PHY
+ select PHYLINK
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc93b69cfd1e..25e9a551cc8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,7 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
@@ -189,6 +189,7 @@
#define MVNETA_GMAC_CTRL_0 0x2c00
#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
@@ -204,13 +205,19 @@
#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
+#define MVNETA_GMAC_AN_COMPLETE BIT(11)
+#define MVNETA_GMAC_SYNC_OK BIT(14)
#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
+#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
+#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
+#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
+#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
@@ -237,6 +244,12 @@
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVNETA_LPI_CTRL_0 0x2cc0
+#define MVNETA_LPI_CTRL_1 0x2cc4
+#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
+#define MVNETA_LPI_CTRL_2 0x2cc8
+#define MVNETA_LPI_STATUS 0x2ccc
+
#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
/* Descriptor ring Macros */
@@ -313,6 +326,11 @@
#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
+enum {
+ ETHTOOL_STAT_EEE_WAKEUP,
+ ETHTOOL_MAX_STATS,
+};
+
struct mvneta_statistic {
unsigned short offset;
unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
#define T_REG_32 32
#define T_REG_64 64
+#define T_SW 1
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x304c, T_REG_32, "broadcast_frames_sent", },
{ 0x3054, T_REG_32, "fc_sent", },
{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+ { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
};
struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
u16 tx_ring_size;
u16 rx_ring_size;
- struct mii_bus *mii_bus;
phy_interface_t phy_interface;
- struct device_node *phy_node;
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
+ struct device_node *dn;
unsigned int tx_csum_limit;
- unsigned int use_inband_status:1;
+ struct phylink *phylink;
struct mvneta_bm *bm_priv;
struct mvneta_bm_pool *pool_long;
struct mvneta_bm_pool *pool_short;
int bm_win_id;
+ bool eee_enabled;
+ bool eee_active;
+ bool tx_lpi_enabled;
+
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1273,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
}
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
- u32 val;
-
- if (enable) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_AN_FLOW_CTRL_EN);
- val |= MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- } else {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
- val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
- }
-}
-
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
@@ -1463,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
- mvneta_set_autoneg(pp, pp->use_inband_status);
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -1958,9 +1939,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ mvneta_rx_error(pp, rx_desc);
err_drop_frame:
dev->stats.rx_errors++;
- mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
}
@@ -2688,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
- struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
{
- struct fixed_phy_status status;
- struct fixed_phy_status changed = {};
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
- if (gmac_stat & MVNETA_GMAC_SPEED_1000)
- status.speed = SPEED_1000;
- else if (gmac_stat & MVNETA_GMAC_SPEED_100)
- status.speed = SPEED_100;
- else
- status.speed = SPEED_10;
- status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- changed.link = 1;
- changed.speed = 1;
- changed.duplex = 1;
- fixed_phy_update_state(phy, &status, &changed);
- return 0;
+ phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -2723,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev);
- struct net_device *ndev = pp->dev;
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
@@ -2737,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
- if (pp->use_inband_status && (cause_misc &
- (MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
- mvneta_fixed_link_update(pp, ndev->phydev);
- }
+
+ if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE))
+ mvneta_link_change(pp);
}
/* Release Tx descriptors */
@@ -3011,7 +2974,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
int queue;
- for (queue = 0; queue < txq_number; queue++)
+ for (queue = 0; queue < rxq_number; queue++)
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
}
@@ -3056,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- struct net_device *ndev = pp->dev;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3081,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
- phy_start(ndev->phydev);
+ phylink_start(pp->phylink);
netif_tx_start_all_queues(pp->dev);
}
static void mvneta_stop_dev(struct mvneta_port *pp)
{
unsigned int cpu;
- struct net_device *ndev = pp->dev;
- phy_stop(ndev->phydev);
+ phylink_stop(pp->phylink);
if (!pp->neta_armada3700) {
for_each_online_cpu(cpu) {
@@ -3247,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_QSGMII &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ /* Asymmetric pause is unsupported */
+ phylink_set(mask, Pause);
+ /* Half-duplex at speeds higher than 100Mbit is unsupported */
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ /* 10M and 100M are only supported in non-802.3z mode */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+ struct phylink_link_state *state)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
- int status_change = 0;
+ u32 gmac_stat;
- if (phydev->link) {
- if ((pp->speed != phydev->speed) ||
- (pp->duplex != phydev->duplex)) {
- u32 val;
+ gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+ state->speed = SPEED_1000;
+ else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+ state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+ state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+ state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ state->pause = 0;
+ if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_RX;
+ if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+ state->pause |= MLO_PAUSE_TX;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ return 1;
+}
- pp->duplex = phydev->duplex;
- pp->speed = phydev->speed;
- }
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
+
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+ u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+ new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+ new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+ MVNETA_GMAC2_PORT_RESET);
+ new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_DUPLEX_EN);
+
+ /* Even though it might look weird, when we're configured in
+ * SGMII or QSGMII mode, the RGMII bit needs to be set.
+ */
+ new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+ if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+ state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+ if (phylink_test(state->advertising, Pause))
+ new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (state->pause & MLO_PAUSE_TXRX_MASK)
+ new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (!phylink_autoneg_inband(mode)) {
+ /* Phy or fixed speed */
+ if (state->duplex)
+ new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->speed == SPEED_1000)
+ new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (state->speed == SPEED_100)
+ new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the state from the PHY */
+ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z negotiation - only 1000base-X */
+ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+ new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ /* The MAC only supports FD mode */
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+ new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
}
- if (phydev->link != pp->link) {
- if (!phydev->link) {
- pp->duplex = -1;
- pp->speed = 0;
- }
+ /* Armada 370 documentation says we can only change the port mode
+ * and in-band enable when the link is down, so force it down
+ * while making these changes. We also do this for GMAC_CTRL2 */
+ if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+ (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+ (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+ (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+ MVNETA_GMAC_FORCE_LINK_DOWN);
+ }
- pp->link = phydev->link;
- status_change = 1;
+ if (new_ctrl0 != gmac_ctrl0)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+ if (new_ctrl2 != gmac_ctrl2)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_clk != gmac_clk)
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+ if (new_an != gmac_an)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+ if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+ while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+ MVNETA_GMAC2_PORT_RESET) != 0)
+ continue;
}
+}
- if (status_change) {
- if (phydev->link) {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
- val |= MVNETA_GMAC_FORCE_LINK_PASS;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_up(pp);
- } else {
- if (!pp->use_inband_status) {
- u32 val = mvreg_read(pp,
- MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
- val |= MVNETA_GMAC_FORCE_LINK_DOWN;
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- val);
- }
- mvneta_port_down(pp);
- }
- phy_print_status(phydev);
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+ u32 lpi_ctl1;
+
+ lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+ if (enable)
+ lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+ else
+ lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+ mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ mvneta_port_down(pp);
+
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
+
+ pp->eee_active = false;
+ mvneta_set_eee(pp, false);
}
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+ struct phy_device *phy)
{
- struct phy_device *phy_dev;
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
- phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
- pp->phy_interface);
- if (!phy_dev) {
- netdev_err(pp->dev, "could not find the PHY\n");
- return -ENODEV;
+ if (!phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val |= MVNETA_GMAC_FORCE_LINK_PASS;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
- phy_ethtool_get_wol(phy_dev, &wol);
- device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+ mvneta_port_up(pp);
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->advertising = phy_dev->supported;
+ if (phy && pp->eee_enabled) {
+ pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+ }
+}
- pp->link = 0;
- pp->duplex = 0;
- pp->speed = 0;
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+ .validate = mvneta_validate,
+ .mac_link_state = mvneta_mac_link_state,
+ .mac_an_restart = mvneta_mac_an_restart,
+ .mac_config = mvneta_mac_config,
+ .mac_link_down = mvneta_mac_link_down,
+ .mac_link_up = mvneta_mac_link_up,
+};
- return 0;
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+ if (err)
+ netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+ phylink_ethtool_get_wol(pp->phylink, &wol);
+ device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+ return err;
}
static void mvneta_mdio_remove(struct mvneta_port *pp)
{
- struct net_device *ndev = pp->dev;
-
- phy_disconnect(ndev->phydev);
+ phylink_disconnect_phy(pp->phylink);
}
/* Electing a CPU must be done in an atomic way: it should be done
@@ -3451,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
return 0;
@@ -3493,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
- MVNETA_CAUSE_LINK_CHANGE |
- MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
return 0;
}
@@ -3622,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- if (!dev->phydev)
- return -ENOTSUPP;
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
/* Ethtool methods */
@@ -3636,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd)
{
struct mvneta_port *pp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!phydev)
- return -ENODEV;
-
- if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
- u32 val;
-
- mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
-
- if (cmd->base.autoneg == AUTONEG_DISABLE) {
- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
- if (phydev->duplex)
- val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (phydev->speed == SPEED_1000)
- val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (phydev->speed == SPEED_100)
- val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
- }
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mvneta_port *pp = netdev_priv(ndev);
- pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
- netdev_info(pp->dev, "autoneg status set to %i\n",
- pp->use_inband_status);
+ return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
- if (netif_running(ndev)) {
- mvneta_port_down(pp);
- mvneta_port_up(pp);
- }
- }
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ return phylink_ethtool_nway_reset(pp->phylink);
}
/* Set interrupt coalescing for ethtools */
@@ -3765,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return 0;
}
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
@@ -3781,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
- u32 high, low, val;
- u64 val64;
+ u32 high, low;
+ u64 val;
int i;
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
+ val = 0;
+
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
- pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
- val64 = (u64)high << 32 | low;
- pp->ethtool_stats[i] += val64;
+ val = (u64)high << 32 | low;
+ break;
+ case T_SW:
+ switch (s->offset) {
+ case ETHTOOL_STAT_EEE_WAKEUP:
+ val = phylink_get_eee_err(pp->phylink);
+ break;
+ }
break;
}
+
+ pp->ethtool_stats[i] += val;
}
}
@@ -3935,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
static void mvneta_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
- if (dev->phydev)
- phy_ethtool_get_wol(dev->phydev, wol);
+ phylink_ethtool_get_wol(pp->phylink, wol);
}
static int mvneta_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
+ struct mvneta_port *pp = netdev_priv(dev);
int ret;
- if (!dev->phydev)
- return -EOPNOTSUPP;
-
- ret = phy_ethtool_set_wol(dev->phydev, wol);
+ ret = phylink_ethtool_set_wol(pp->phylink, wol);
if (!ret)
device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
return ret;
}
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *buf)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+ eee->eee_enabled = pp->eee_enabled;
+ eee->eee_active = pp->eee_active;
+ eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+ eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+ return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+ struct ethtool_eee *eee)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u32 lpi_ctl0;
+
+ /* The Armada 37x documents do not give limits for this other than
+ * it being an 8-bit register. */
+ if (eee->tx_lpi_enabled &&
+ (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ return -EINVAL;
+
+ lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+ lpi_ctl0 &= ~(0xff << 8);
+ lpi_ctl0 |= eee->tx_lpi_timer << 8;
+ mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+ pp->eee_enabled = eee->eee_enabled;
+ pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+ mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+ return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -3970,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
+ .nway_reset = mvneta_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
.get_coalesce = mvneta_ethtool_get_coalesce,
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_pauseparam = mvneta_ethtool_get_pauseparam,
+ .set_pauseparam = mvneta_ethtool_set_pauseparam,
.get_strings = mvneta_ethtool_get_strings,
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
@@ -3984,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_module_info = mvneta_ethtool_get_module_info,
+ .get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+ .get_eee = mvneta_ethtool_get_eee,
+ .set_eee = mvneta_ethtool_set_eee,
};
/* Initialize hw */
@@ -4087,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
/* Power up the port */
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
- u32 ctrl;
-
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
- /* Even though it might look weird, when we're configured in
- * SGMII or QSGMII mode, the RGMII bit needs to be set.
- */
- switch(phy_mode) {
- case PHY_INTERFACE_MODE_QSGMII:
+ if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_SGMII:
+ else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- ctrl |= MVNETA_GMAC2_PORT_RGMII;
- break;
- default:
+ else if (!phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
- }
-
- /* Cancel Port Reset */
- ctrl &= ~MVNETA_GMAC2_PORT_RESET;
- mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
- while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
- MVNETA_GMAC2_PORT_RESET) != 0)
- continue;
return 0;
}
@@ -4132,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
{
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
- struct device_node *phy_node;
struct device_node *bm_node;
struct mvneta_port *pp;
struct net_device *dev;
+ struct phylink *phylink;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
- const char *managed;
int tx_csum_limit;
int phy_mode;
int err;
@@ -4155,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node) {
- if (!of_phy_is_fixed_link(dn)) {
- dev_err(&pdev->dev, "no PHY specified\n");
- err = -ENODEV;
- goto err_free_irq;
- }
-
- err = of_phy_register_fixed_link(dn);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot register fixed PHY\n");
- goto err_free_irq;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phy_node = of_node_get(dn);
- }
-
phy_mode = of_get_phy_mode(dn);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
err = -EINVAL;
- goto err_put_phy_node;
+ goto err_free_irq;
+ }
+
+ phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+ &mvneta_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_free_irq;
}
dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4190,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
spin_lock_init(&pp->lock);
- pp->phy_node = phy_node;
+ pp->phylink = phylink;
pp->phy_interface = phy_mode;
-
- err = of_property_read_string(dn, "managed", &managed);
- pp->use_inband_status = (err == 0 &&
- strcmp(managed, "in-band-status") == 0);
+ pp->dn = dn;
pp->rxq_def = rxq_def;
@@ -4217,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
- goto err_put_phy_node;
+ goto err_free_phylink;
}
clk_prepare_enable(pp->clk);
@@ -4354,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pp->dev);
- if (pp->use_inband_status) {
- struct phy_device *phy = of_phy_find_device(dn);
-
- mvneta_fixed_link_update(pp, phy);
-
- put_device(&phy->mdio.dev);
- }
-
return 0;
err_netdev:
@@ -4378,10 +4506,9 @@ err_free_ports:
err_clk:
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
-err_put_phy_node:
- of_node_put(phy_node);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+ if (pp->phylink)
+ phylink_destroy(pp->phylink);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
@@ -4393,7 +4520,6 @@ err_free_netdev:
static int mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
@@ -4401,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
clk_disable_unprepare(pp->clk);
free_percpu(pp->ports);
free_percpu(pp->stats);
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
irq_dispose_mapping(dev->irq);
- of_node_put(pp->phy_node);
+ phylink_destroy(pp->phylink);
free_netdev(dev);
if (pp->bm_priv) {
@@ -4422,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct mvneta_port *pp = netdev_priv(dev);
+ rtnl_lock();
if (netif_running(dev))
mvneta_stop(dev);
+ rtnl_unlock();
netif_device_detach(dev);
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk);
@@ -4456,14 +4582,13 @@ static int mvneta_resume(struct device *device)
return err;
}
- if (pp->use_inband_status)
- mvneta_fixed_link_update(pp, dev->phydev);
-
netif_device_attach(dev);
+ rtnl_lock();
if (netif_running(dev)) {
mvneta_open(dev);
mvneta_set_rx_mode(dev);
}
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 634b2f41cc9e..a1d7b88cf083 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -10,6 +10,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -454,11 +455,11 @@
/* Various constants */
/* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_TXDONE_COAL_USEC 1000
#define MVPP2_RX_COAL_PKTS 32
-#define MVPP2_RX_COAL_USEC 100
+#define MVPP2_RX_COAL_USEC 64
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
@@ -504,10 +505,12 @@
#define MVPP2_DEFAULT_RXQ 4
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD 128
+#define MVPP2_MAX_RXD_MAX 1024
+#define MVPP2_MAX_RXD_DFLT 128
/* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD 1024
+#define MVPP2_MAX_TXD_MAX 2048
+#define MVPP2_MAX_TXD_DFLT 1024
/* Amount of Tx descriptors that can be reserved at once by CPU */
#define MVPP2_CPU_DESC_CHUNK 64
@@ -863,7 +866,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
- struct mvpp2_port **port_list;
+ struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
@@ -930,6 +933,9 @@ struct mvpp2_port {
struct mvpp2 *priv;
+ /* Firmware node associated to the port */
+ struct fwnode_handle *fwnode;
+
/* Per-port registers' base address */
void __iomem *base;
void __iomem *stats_base;
@@ -5802,6 +5808,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->reserved_num = 0;
txq_pcpu->txq_put_index = 0;
txq_pcpu->txq_get_index = 0;
+ txq_pcpu->tso_headers = NULL;
txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
@@ -5829,10 +5836,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->buffs);
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
+ if (txq_pcpu->tso_headers)
+ dma_free_coherent(port->dev->dev.parent,
+ txq_pcpu->size * TSO_HEADER_SIZE,
+ txq_pcpu->tso_headers,
+ txq_pcpu->tso_headers_dma);
+
+ txq_pcpu->tso_headers = NULL;
}
if (txq->descs)
@@ -6832,13 +6842,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending == 0 || ring->tx_pending == 0)
return -EINVAL;
- if (ring->rx_pending > MVPP2_MAX_RXD)
- new_rx_pending = MVPP2_MAX_RXD;
+ if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+ new_rx_pending = MVPP2_MAX_RXD_MAX;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
- if (ring->tx_pending > MVPP2_MAX_TXD)
- new_tx_pending = MVPP2_MAX_TXD;
+ if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+ new_tx_pending = MVPP2_MAX_TXD_MAX;
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
@@ -7318,9 +7328,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
- c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
- c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
return 0;
}
@@ -7340,8 +7351,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
- ring->rx_max_pending = MVPP2_MAX_RXD;
- ring->tx_max_pending = MVPP2_MAX_TXD;
+ ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+ ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
ring->rx_pending = port->rx_ring_size;
ring->tx_pending = port->tx_ring_size;
}
@@ -7492,7 +7503,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
strncpy(irqname, "rx-shared", sizeof(irqname));
}
- v->irq = of_irq_get_byname(port_node, irqname);
+ if (port_node)
+ v->irq = of_irq_get_byname(port_node, irqname);
+ else
+ v->irq = fwnode_irq_get(port->fwnode, i);
if (v->irq <= 0) {
ret = -EINVAL;
goto err;
@@ -7704,17 +7718,16 @@ static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
}
static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct device_node *port_node,
+ struct fwnode_handle *fwnode,
char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
- const char *dt_mac_addr;
+ char fw_mac_addr[ETH_ALEN];
- dt_mac_addr = of_get_mac_address(port_node);
- if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
- *mac_from = "device tree";
- ether_addr_copy(dev->dev_addr, dt_mac_addr);
+ if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ *mac_from = "firmware node";
+ ether_addr_copy(dev->dev_addr, fw_mac_addr);
return;
}
@@ -7733,13 +7746,14 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
- struct device_node *port_node,
- struct mvpp2 *priv, int index)
+ struct fwnode_handle *port_fwnode,
+ struct mvpp2 *priv)
{
struct device_node *phy_node;
- struct phy *comphy;
+ struct phy *comphy = NULL;
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
+ struct device_node *port_node = to_of_node(port_fwnode);
struct net_device *dev;
struct resource *res;
char *mac_from = "";
@@ -7750,7 +7764,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int phy_mode;
int err, i, cpu;
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ if (port_node) {
+ has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
+ } else {
+ has_tx_irqs = true;
+ queue_mode = MVPP2_QDIST_MULTI_MODE;
+ }
if (!has_tx_irqs)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
@@ -7765,36 +7784,43 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!dev)
return -ENOMEM;
- phy_node = of_parse_phandle(port_node, "phy", 0);
- phy_mode = of_get_phy_mode(port_node);
+ if (port_node)
+ phy_node = of_parse_phandle(port_node, "phy", 0);
+ else
+ phy_node = NULL;
+
+ phy_mode = fwnode_get_phy_mode(port_fwnode);
if (phy_mode < 0) {
dev_err(&pdev->dev, "incorrect phy mode\n");
err = phy_mode;
goto err_free_netdev;
}
- comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
- if (IS_ERR(comphy)) {
- if (PTR_ERR(comphy) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
- goto err_free_netdev;
+ if (port_node) {
+ comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
+ if (IS_ERR(comphy)) {
+ if (PTR_ERR(comphy) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+ comphy = NULL;
}
- comphy = NULL;
}
- if (of_property_read_u32(port_node, "port-id", &id)) {
+ if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing port-id value\n");
goto err_free_netdev;
}
- dev->tx_queue_len = MVPP2_MAX_TXD;
+ dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvpp2_netdev_ops;
dev->ethtool_ops = &mvpp2_eth_tool_ops;
port = netdev_priv(dev);
port->dev = dev;
+ port->fwnode = port_fwnode;
port->ntxqs = ntxqs;
port->nrxqs = nrxqs;
port->priv = priv;
@@ -7804,7 +7830,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (err)
goto err_free_netdev;
- port->link_irq = of_irq_get_byname(port_node, "link");
+ if (port_node)
+ port->link_irq = of_irq_get_byname(port_node, "link");
+ else
+ port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
if (port->link_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
@@ -7813,7 +7842,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* the link irq is optional */
port->link_irq = 0;
- if (of_property_read_bool(port_node, "marvell,loopback"))
+ if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
port->id = id;
@@ -7838,8 +7867,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MVPP21_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
} else {
- if (of_property_read_u32(port_node, "gop-port-id",
- &port->gop_id)) {
+ if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
+ &port->gop_id)) {
err = -EINVAL;
dev_err(&pdev->dev, "missing gop-port-id value\n");
goto err_deinit_qvecs;
@@ -7869,10 +7898,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
+ mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
- port->tx_ring_size = MVPP2_MAX_TXD;
- port->rx_ring_size = MVPP2_MAX_RXD;
+ port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+ port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
SET_NETDEV_DEV(dev, &pdev->dev);
err = mvpp2_port_init(port);
@@ -7927,7 +7956,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
- priv->port_list[index] = port;
+ priv->port_list[priv->port_count++] = port;
+
return 0;
err_free_port_pcpu:
@@ -8186,8 +8216,9 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_probe(struct platform_device *pdev)
{
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ const struct acpi_device_id *acpi_id;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -8198,8 +8229,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->hw_version =
- (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (has_acpi_companion(&pdev->dev)) {
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ priv->hw_version = (unsigned long)acpi_id->driver_data;
+ } else {
+ priv->hw_version =
+ (unsigned long)of_device_get_match_data(&pdev->dev);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -8213,10 +8250,23 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (has_acpi_companion(&pdev->dev)) {
+ /* In case the MDIO memory region is declared in
+ * the ACPI, it can already appear as 'in-use'
+ * in the OS. Because it is overlapped by second
+ * region of the network controller, make
+ * sure it is released, before requesting it again.
+ * The care is taken by mvpp2 driver to avoid
+ * concurrent access to this memory region.
+ */
+ release_resource(res);
+ }
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+ }
+ if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -8242,32 +8292,34 @@ static int mvpp2_probe(struct platform_device *pdev)
else
priv->max_port_rxqs = 32;
- priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
- if (IS_ERR(priv->pp_clk))
- return PTR_ERR(priv->pp_clk);
- err = clk_prepare_enable(priv->pp_clk);
- if (err < 0)
- return err;
-
- priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
- if (IS_ERR(priv->gop_clk)) {
- err = PTR_ERR(priv->gop_clk);
- goto err_pp_clk;
- }
- err = clk_prepare_enable(priv->gop_clk);
- if (err < 0)
- goto err_pp_clk;
+ if (dev_of_node(&pdev->dev)) {
+ priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+ if (IS_ERR(priv->pp_clk))
+ return PTR_ERR(priv->pp_clk);
+ err = clk_prepare_enable(priv->pp_clk);
+ if (err < 0)
+ return err;
- if (priv->hw_version == MVPP22) {
- priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
- if (IS_ERR(priv->mg_clk)) {
- err = PTR_ERR(priv->mg_clk);
- goto err_gop_clk;
+ priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+ if (IS_ERR(priv->gop_clk)) {
+ err = PTR_ERR(priv->gop_clk);
+ goto err_pp_clk;
}
-
- err = clk_prepare_enable(priv->mg_clk);
+ err = clk_prepare_enable(priv->gop_clk);
if (err < 0)
- goto err_gop_clk;
+ goto err_pp_clk;
+
+ if (priv->hw_version == MVPP22) {
+ priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
+ if (IS_ERR(priv->mg_clk)) {
+ err = PTR_ERR(priv->mg_clk);
+ goto err_gop_clk;
+ }
+
+ err = clk_prepare_enable(priv->mg_clk);
+ if (err < 0)
+ goto err_gop_clk;
+ }
priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
if (IS_ERR(priv->axi_clk)) {
@@ -8280,10 +8332,14 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_gop_clk;
}
- }
- /* Get system's tclk rate */
- priv->tclk = clk_get_rate(priv->pp_clk);
+ /* Get system's tclk rate */
+ priv->tclk = clk_get_rate(priv->pp_clk);
+ } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
+ &priv->tclk)) {
+ dev_err(&pdev->dev, "missing clock-frequency value\n");
+ return -EINVAL;
+ }
if (priv->hw_version == MVPP22) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
@@ -8306,30 +8362,19 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_mg_clk;
}
- priv->port_count = of_get_available_child_count(dn);
+ /* Initialize ports */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ err = mvpp2_port_probe(pdev, port_fwnode, priv);
+ if (err < 0)
+ goto err_port_probe;
+ }
+
if (priv->port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
goto err_mg_clk;
}
- priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count,
- sizeof(*priv->port_list),
- GFP_KERNEL);
- if (!priv->port_list) {
- err = -ENOMEM;
- goto err_mg_clk;
- }
-
- /* Initialize ports */
- i = 0;
- for_each_available_child_of_node(dn, port_node) {
- err = mvpp2_port_probe(pdev, port_node, priv, i);
- if (err < 0)
- goto err_port_probe;
- i++;
- }
-
/* Statistics must be gathered regularly because some of them (like
* packets counters) are 32-bit registers and could overflow quite
* quickly. For instance, a 10Gb link used at full bandwidth with the
@@ -8350,7 +8395,7 @@ static int mvpp2_probe(struct platform_device *pdev)
err_port_probe:
i = 0;
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i])
mvpp2_port_remove(priv->port_list[i]);
i++;
@@ -8369,14 +8414,14 @@ err_pp_clk:
static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct device_node *dn = pdev->dev.of_node;
- struct device_node *port_node;
+ struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ struct fwnode_handle *port_fwnode;
int i = 0;
flush_workqueue(priv->stats_queue);
destroy_workqueue(priv->stats_queue);
- for_each_available_child_of_node(dn, port_node) {
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
mvpp2_port_remove(priv->port_list[i]);
@@ -8399,6 +8444,9 @@ static int mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
+ if (is_acpi_node(port_fwnode))
+ return 0;
+
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
@@ -8420,12 +8468,19 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+static const struct acpi_device_id mvpp2_acpi_match[] = {
+ { "MRVL0110", MVPP22 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
.remove = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
+ .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
},
};
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f098a60..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
if (hw->ports > 1) {
skge_write32(hw, B0_IMSK, 0);
skge_read32(hw, B0_IMSK);
- free_irq(pdev->irq, hw);
}
spin_unlock_irq(&hw->hw_lock);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9efe1771423c..9fe85300e7b6 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
return -ETIMEDOUT;
}
- mdelay(1);
+ msleep(1);
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 54adfd967858..29826dd15204 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1952,20 +1952,23 @@ static int mtk_hw_init(struct mtk_eth *eth)
}
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- /* Set GE2 driving and slew rate */
- regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
- /* set GE2 TDSEL */
- regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
- /* set GE2 TUNE */
- regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
-
- /* GE1, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
- /* GE2, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_phy_link_adjust call is
+ * being invoked.
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ mtk_w32(eth, 0, MTK_MAC_MCR(i));
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
@@ -2537,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,pctl");
- if (IS_ERR(eth->pctl)) {
- dev_err(&pdev->dev, "no pctl regmap found\n");
- return PTR_ERR(eth->pctl);
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
}
for (i = 0; i < 3; i++) {
@@ -2667,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
static const struct mtk_soc_data mt7622_data = {
.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
- .required_clks = MT7622_CLKS_BITMAP
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MTK_GMAC1_TRGMII,
- .required_clks = MT7623_CLKS_BITMAP
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
};
const struct of_device_id of_mtk_match[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a3af4660de81..672b8c353c47 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
* @caps Flags shown the extra capability for the SoC
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
*/
struct mtk_soc_data {
u32 caps;
u32 required_clks;
+ bool required_pctl;
};
/* currently no SoC has more than 2 macs */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc92aa68..1a0c3bf86ead 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
}
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ pg[i] = MLX4_EN_TC_VENDOR;
+ tc_tx_bw[i] = MLX4_EN_BW_MAX;
+ break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f04164885..ebc1f566a4d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
+ if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+ en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ MLX4_EN_MIN_RX_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+ en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+ __func__, param->tx_pending,
+ MLX4_EN_MIN_TX_SIZE);
+ return -EINVAL;
+ }
+
rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a294fa6..8fc51bc29003 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
- node))
+ node, i))
goto err;
+
}
#ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ u8 prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+ priv->ets.prio_tc[prio] = prio;
+ priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct net_device *dev = mdev->pndev[port];
struct mlx4_en_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+
+ mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox_priority)) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return PTR_ERR(mailbox_priority);
+ }
+
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mlx4_en_stats = mailbox->buf;
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
+ /* 0xffs indicates invalid value */
+ memset(mailbox_priority->buf, 0xff,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+ memset(mailbox_priority->buf, 0,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
+ in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+ 0, MLX4_CMD_DUMP_ETH_STATS,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+ }
+
+ flowstats = mailbox_priority->buf;
+
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
- spin_unlock_bh(&priv->stats_lock);
-
- memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
- counter_index = mlx4_get_default_counter_index(mdev->dev, port);
- err = mlx4_get_counter_stats(mdev->dev, counter_index,
- &tmp_counter_stats, reset);
-
- /* 0xffs indicates invalid value */
- memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
- memset(mailbox->buf, 0,
- sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
- err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
- in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
- 0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
- if (err)
- goto out;
- }
-
- flowstats = mailbox->buf;
-
- spin_lock_bh(&priv->stats_lock);
-
if (tmp_counter_stats.counter_mode == 0) {
priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28efcda33..b4d144e67514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node)
+ u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+ goto err_ring;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info = vzalloc(tmp);
if (!ring->rx_info) {
err = -ENOMEM;
- goto err_ring;
+ goto err_xdp_info;
}
}
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
+err_xdp_info:
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err_ring:
kfree(ring);
*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
return 0;
}
#endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hdr += sizeof(struct vlan_hdr);
}
- if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
- return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
- return 0;
+ return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int cq_ring = cq->ring;
bool doorbell_pending;
struct mlx4_cqe *cqe;
+ struct xdp_buff xdp;
int polled = 0;
int index;
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
+ xdp.rxq = &ring->xdp_rxq;
doorbell_pending = 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* read bytes but not past the end of the frag.
*/
if (xdp_prog) {
- struct xdp_buff xdp;
dma_addr_t dma;
void *orig_data;
u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
- if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- cqe->checksum == cpu_to_be16(0xffff)) {
- bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
- (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
- ip_summed = CHECKSUM_UNNECESSARY;
- hash_type = PKT_HASH_TYPE_L4;
- if (l2_tunnel)
- skb->csum_level = 1;
- ring->csum_ok++;
- } else {
+ bool l2_tunnel;
+
+ if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)))
goto csum_none;
- }
+
+ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+ (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ ip_summed = CHECKSUM_UNNECESSARY;
+ hash_type = PKT_HASH_TYPE_L4;
+ if (l2_tunnel)
+ skb->csum_level = 1;
+ ring->csum_ok++;
} else {
- if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
- MLX4_CQE_STATUS_IPV6))) {
- if (check_csum(cqe, skb, va, dev->features)) {
- goto csum_none;
- } else {
- ip_summed = CHECKSUM_COMPLETE;
- hash_type = PKT_HASH_TYPE_L3;
- ring->csum_complete++;
- }
- } else {
+ if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+ MLX4_CQE_STATUS_IPV6))))
+#else
+ 0))))
+#endif
goto csum_none;
- }
+ if (check_csum(cqe, skb, va, dev->features))
+ goto csum_none;
+ ip_summed = CHECKSUM_COMPLETE;
+ hash_type = PKT_HASH_TYPE_L3;
+ ring->csum_complete++;
}
} else {
csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
- if (priv->port_up)
+ if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
buf[4] = mlx4_en_test_loopback(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..f470ae37d937 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -153,6 +154,9 @@
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+#define PREAMBLE_LEN 8
+#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
+ ETH_HLEN + PREAMBLE_LEN)
#define MLX4_EN_MIN_MTU 46
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
@@ -353,6 +357,7 @@ struct mlx4_en_rx_ring {
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
+ struct xdp_rxq_info xdp_rxq;
};
struct mlx4_en_cq {
@@ -476,6 +481,7 @@ struct mlx4_en_frag_info {
#define MLX4_EN_BW_MIN 1
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+#define MLX4_EN_TC_VENDOR 0
#define MLX4_EN_TC_ETS 7
enum dcb_pfc_type {
@@ -716,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
- u32 size, u16 stride, int node);
+ u32 size, u16 stride, int node, int queue_index);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764991c9..2e84f10f59ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
- struct mlx4_cmd_mailbox *mailbox;
- int err;
-
if (!fmr->maps)
return;
- fmr->maps = 0;
+ /* To unmap: it is sufficient to take back ownership from HW */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
- return;
- }
+ /* Make sure MPT status is visible */
+ wmb();
- err = mlx4_HW2SW_MPT(dev, NULL,
- key_to_hw_index(fmr->mr.key) &
- (dev->caps.num_mpts - 1));
- mlx4_free_cmd_mailbox(dev, mailbox);
- if (err) {
- pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
- return;
- }
- fmr->mr.enabled = MLX4_MPT_EN_SW;
+ fmr->maps = 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
+ if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+ /* In case of FMR was enabled and unmapped
+ * make sure to give ownership of MPT back to HW
+ * so HW2SW_MPT command will success.
+ */
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+ /* Make sure MPT status is visible before changing MPT fields */
+ wmb();
+ fmr->mpt->length = 0;
+ fmr->mpt->start = 0;
+ /* Make sure MPT data is visible after changing MPT status */
+ wmb();
+ *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+ /* make sure MPT status is visible */
+ wmb();
+ }
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 769598f7b6c8..3aaf4bad6c5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;
+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
- res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b40ab07..c805769d92a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1fffdebbc9e8..e9a1fbcc4adf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
- case MLX5_CMD_OP_SET_RATE_LIMIT:
+ case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
- MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+ MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3284cb..4c9360b25532 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,11 @@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -82,6 +85,9 @@
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+ (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+ MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -223,12 +229,6 @@ enum mlx5e_priv_flag {
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
-struct mlx5e_cq_moder {
- u16 usec;
- u16 pkts;
- u8 cq_period_mode;
-};
-
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -239,8 +239,8 @@ struct mlx5e_params {
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
- struct mlx5e_cq_moder rx_cq_moderation;
- struct mlx5e_cq_moder tx_cq_moderation;
+ struct net_dim_cq_moder rx_cq_moderation;
+ struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
@@ -250,7 +250,7 @@ struct mlx5e_params {
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
- bool rx_am_enabled;
+ bool rx_dim_enabled;
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
@@ -469,32 +469,6 @@ struct mlx5e_mpw_info {
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
};
-struct mlx5e_rx_am_stats {
- int ppms; /* packets per msec */
- int bpms; /* bytes per msec */
- int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
- ktime_t time;
- u32 pkt_ctr;
- u32 byte_ctr;
- u16 event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
- u8 state;
- struct mlx5e_rx_am_stats prev_stats;
- struct mlx5e_rx_am_sample start_sample;
- struct work_struct work;
- u8 profile_ix;
- u8 mode;
- u8 tune_state;
- u8 steps_right;
- u8 steps_left;
- u8 tired;
-};
-
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
@@ -555,7 +529,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct net_dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog *xdp_prog;
@@ -568,6 +542,9 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
+
+ /* XDP read-mostly */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct mlx5e_channel {
@@ -584,12 +561,14 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
+ struct mlx5e_ch_stats stats;
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
int ix;
+ int cpu;
};
struct mlx5e_channels {
@@ -651,6 +630,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
@@ -718,6 +698,11 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+enum {
+ MLX5E_TC_FT_LEVEL = 0,
+ MLX5E_TC_TTC_FT_LEVEL,
+};
+
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
@@ -856,11 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
+void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
@@ -891,7 +872,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -935,8 +916,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u8 rq_type);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
@@ -1049,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+struct ttc_params {
+ struct mlx5_flow_table_attr ft_attr;
+ u32 any_tt_tirn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table *inner_ttc;
+};
+
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
@@ -1066,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+int mlx5e_bits_invert(unsigned long a, int size);
+
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
@@ -1105,4 +1104,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c6d90b6dd80e..3d46ef48d5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
+ bool have_ets_tc = false;
int bw_sum = 0;
int i;
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ have_ets_tc = true;
bw_sum += ets->tc_tx_bw[i];
+ }
+ }
- if (bw_sum != 0 && bw_sum != 100) {
+ if (have_ets_tc && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
@@ -918,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
- int i;
struct ieee_ets ets;
+ int err;
+ int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
@@ -932,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
- ets.prio_tc[0] = 1;
- ets.prio_tc[1] = 0;
+ if (ets.ets_cap > 1) {
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ ets.prio_tc[0] = 1;
+ ets.prio_tc[1] = 0;
+ }
- mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ if (err)
+ netdev_err(priv->netdev,
+ "%s, Failed to init ETS: %d\n", __func__, err);
}
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 000000000000..602851ab5b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim,
+ work);
+ struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+ dim->profile_ix);
+
+ mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 23425f028405..cc8048f68f11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -296,7 +295,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
- u32 max_rq_size;
u8 log_rq_size;
u8 log_sq_size;
u32 num_mtts;
@@ -315,8 +313,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_min_log_rq_size(rq_wq_type));
- max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
@@ -326,12 +322,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > max_rq_size) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
- __func__, param->rx_pending,
- max_rq_size);
- return -EINVAL;
- }
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -347,12 +337,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
- if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
- return -EINVAL;
- }
log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
@@ -480,7 +464,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec;
coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
- coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+ coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
return 0;
}
@@ -534,7 +518,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
- new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -542,7 +526,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
}
/* we are opened */
- reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+ reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
if (!reset) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
@@ -1523,8 +1507,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
- new_channels.params.rq_wq_type);
+ new_channels.params.mpwqe_log_stride_sz =
+ MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+ new_channels.params.mpwqe_log_num_strides =
+ MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1536,6 +1522,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return err;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+ MLX5E_GET_PFLAG(&priv->channels.params,
+ MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index def513484845..f64dda2bed31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
int tt;
int err;
- ttc = &priv->fs.ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
goto del_rules;
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
rules = ttc->tunnel_rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.inner_ttc.ft.t;
+ dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule;
}
-static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
+static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
+ struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
- struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
int err;
int tt;
- ttc = &priv->fs.inner_ttc;
ft = ttc->ft.t;
rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY)
- dest.tir_num = priv->direct_tir[0].tirn;
+ dest.tir_num = params->any_tt_tirn;
else
- dest.tir_num = priv->inner_indir_tir[tt].tirn;
+ dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
@@ -1075,21 +1075,42 @@ err:
return err;
}
-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
+ struct ttc_params *ttc_params)
+{
+ ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
+ ttc_params->inner_ttc = &priv->fs.inner_ttc;
+}
+
+void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
+
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+
+ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
+ ft_attr->level = MLX5E_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_NIC_PRIO;
+}
+
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0;
- ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_inner_ttc_table_rules(priv);
+ err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1111,10 +1132,9 @@ err:
return err;
}
-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
-
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
+ struct mlx5e_ttc_table *ttc)
{
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft);
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
+ struct mlx5e_ttc_table *ttc)
{
bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
- struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
- ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
- ft_attr.level = MLX5E_TTC_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
-
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
if (err)
goto err;
- err = mlx5e_generate_ttc_table_rules(priv);
+ err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
if (err)
goto err;
@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d2b057a3e512..47bab842c5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
struct mlx5e_cq_param icosq_cq;
};
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
- return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type)
{
params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- params->mpwqe_log_stride_sz =
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
- MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+ params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
params->mpwqe_log_stride_sz;
break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
!params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_set_rq_type_params(mdev, params, rq_type);
+ mlx5e_init_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -179,182 +173,23 @@ unlock:
rtnl_unlock();
}
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
-{
- struct mlx5e_sw_stats temp, *s = &temp;
- struct mlx5e_rq_stats *rq_stats;
- struct mlx5e_sq_stats *sq_stats;
- int i, j;
-
- memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->channels.num; i++) {
- struct mlx5e_channel *c = priv->channels.c[i];
-
- rq_stats = &c->rq.stats;
-
- s->rx_packets += rq_stats->packets;
- s->rx_bytes += rq_stats->bytes;
- s->rx_lro_packets += rq_stats->lro_packets;
- s->rx_lro_bytes += rq_stats->lro_bytes;
- s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
- s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_complete += rq_stats->csum_complete;
- s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
- s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
- s->rx_xdp_drop += rq_stats->xdp_drop;
- s->rx_xdp_tx += rq_stats->xdp_tx;
- s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
- s->rx_wqe_err += rq_stats->wqe_err;
- s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
- s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
- s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
- s->rx_cache_reuse += rq_stats->cache_reuse;
- s->rx_cache_full += rq_stats->cache_full;
- s->rx_cache_empty += rq_stats->cache_empty;
- s->rx_cache_busy += rq_stats->cache_busy;
- s->rx_cache_waive += rq_stats->cache_waive;
-
- for (j = 0; j < priv->channels.params.num_tc; j++) {
- sq_stats = &c->sq[j].stats;
-
- s->tx_packets += sq_stats->packets;
- s->tx_bytes += sq_stats->bytes;
- s->tx_tso_packets += sq_stats->tso_packets;
- s->tx_tso_bytes += sq_stats->tso_bytes;
- s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
- s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
- s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
- s->tx_queue_stopped += sq_stats->stopped;
- s->tx_queue_wake += sq_stats->wake;
- s->tx_queue_dropped += sq_stats->dropped;
- s->tx_xmit_more += sq_stats->xmit_more;
- s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
- s->tx_csum_none += sq_stats->csum_none;
- s->tx_csum_partial += sq_stats->csum_partial;
- }
- }
-
- s->link_down_events_phy = MLX5_GET(ppcnt_reg,
- priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
- memcpy(&priv->stats.sw, s, sizeof(*s));
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u32 *out = (u32 *)priv->stats.vport.query_vport_out;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
- struct mlx5_core_dev *mdev = priv->mdev;
-
- MLX5_SET(query_vport_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_VPORT_COUNTER);
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
-{
- struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int prio;
- void *out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- out = pstats->IEEE_802_3_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (!full)
- return;
-
- out = pstats->RFC_2863_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->RFC_2819_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- out = pstats->phy_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
- out = pstats->eth_ext_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- }
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- out = pstats->per_prio_counters[prio];
- MLX5_SET(ppcnt_reg, in, prio_tc, prio);
- mlx5_core_access_reg(mdev, in, sz, out, sz,
- MLX5_REG_PPCNT, 0, 0);
- }
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
- struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
-
- if (!priv->q_counter)
- return;
-
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
- return;
-
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
- int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
- void *out;
-
- if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
- return;
-
- out = pcie_stats->pcie_perf_counters;
- MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-}
+ int i;
-void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
-{
- if (full) {
- mlx5e_update_pcie_counters(priv);
- mlx5e_ipsec_update_stats(priv);
- }
- mlx5e_update_pport_counters(priv, full);
- mlx5e_update_vport_counters(priv);
- mlx5e_update_q_counter(priv);
- mlx5e_update_sw_counters(priv);
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats)
+ mlx5e_stats_grps[i].update_stats(priv);
}
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_stats(priv, false);
+ int i;
+
+ for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
+ if (mlx5e_stats_grps[i].update_stats_mask &
+ MLX5E_NDO_UPDATE_STATS)
+ mlx5e_stats_grps[i].update_stats(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
@@ -444,17 +279,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
int mtt_sz = mlx5e_get_wqe_mtt_sz();
int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
- int node = mlx5e_get_node(c->priv, c->ix);
int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
- GFP_KERNEL, node);
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
goto err_out;
/* We allocate more than mtt_sz as we will align the pointer */
- rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
- GFP_KERNEL, node);
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ cpu_to_node(c->cpu));
if (unlikely(!rq->mpwqe.mtt_no_align))
goto err_free_wqe_info;
@@ -562,7 +396,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
int err;
int i;
- rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ rqp->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
@@ -589,6 +423,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+ if (err < 0)
+ goto err_rq_wq_destroy;
+
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = params->rq_headroom;
@@ -629,8 +467,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
default: /* MLX5_WQ_TYPE_LINKED_LIST */
rq->wqe.frag_info =
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
- GFP_KERNEL,
- mlx5e_get_node(c->priv, c->ix));
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->wqe.frag_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
@@ -682,8 +519,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
wqe->data.lkey = rq->mkey_be;
}
- INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+ INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+ switch (params->rx_cq_moderation.cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -695,6 +541,7 @@ err_destroy_umr_mkey:
err_rq_wq_destroy:
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
@@ -707,6 +554,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
if (rq->xdp_prog)
bpf_prog_put(rq->xdp_prog);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
@@ -927,7 +776,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- if (params->rx_am_enabled)
+ if (params->rx_dim_enabled)
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
@@ -960,7 +809,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->am.work);
+ cancel_work_sync(&rq->dim.work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1000,13 +849,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1053,13 +902,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1126,13 +975,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1504,8 +1353,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->priv->mdev;
int err;
- param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1573,7 +1422,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
- struct mlx5e_cq_moder moder,
+ struct net_dim_cq_moder moder,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -1604,6 +1453,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1750,14 +1604,15 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
- struct mlx5e_cq_moder icocq_moder = {0, 0};
+ struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1765,6 +1620,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
+ c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1709,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev,
- mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+ netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2001,7 +1856,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2205,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
-static int mlx5e_bits_invert(unsigned long a, int size)
+int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
@@ -2671,7 +2526,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2692,7 +2547,6 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -2768,6 +2622,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
if (err)
return err;
+ /* Mark as unused given "Drop-RQ" packets never reach XDP */
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
rq->mdev = mdev;
return 0;
@@ -3087,9 +2944,6 @@ out:
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -3107,7 +2961,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -3221,12 +3075,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
- netdev->features |= feature; \
+ *features |= feature; \
else \
- netdev->features &= ~feature; \
+ *features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3349,6 +3203,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
@@ -3367,34 +3222,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return err;
}
- MLX5E_SET_FEATURE(netdev, feature, enable);
+ MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t oper_features = netdev->features;
int err;
- err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
- set_feature_lro);
- err |= mlx5e_handle_feature(netdev, features,
+ err = mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_LRO, set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
- set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
- set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
- set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
- set_feature_rx_vlan);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXALL, set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
- set_feature_arfs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_NTUPLE, set_feature_arfs);
#endif
- return err ? -EINVAL : 0;
+ if (err) {
+ netdev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3679,6 +3540,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
{
+ unsigned int offset = 0;
struct udphdr *udph;
u8 proto;
u16 port;
@@ -3688,7 +3550,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
- proto = ipv6_hdr(skb)->nexthdr;
+ proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
break;
default:
goto out;
@@ -3733,26 +3595,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
+static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int irqn_not_used, eqn;
+ struct mlx5_eq *eq;
+ u32 eqe_count;
+
+ if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
+ return false;
+
+ eq = mlx5_eqn2eq(mdev, eqn);
+ if (IS_ERR(eq))
+ return false;
+
+ netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eqn, eq->cons_index, eq->irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return false;
+
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ sq->channel->stats.eq_rearm++;
+ return true;
+}
+
static void mlx5e_tx_timeout(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- bool sched_work = false;
+ bool reopen_channels = false;
int i;
netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
- if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ if (!netif_xmit_stopped(dev_queue))
continue;
- sched_work = true;
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
- i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - dev_queue->trans_start));
+
+ /* If we recover a lost interrupt, most likely TX timeout will
+ * be resolved, skip reopening channels
+ */
+ if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ reopen_channels = true;
+ }
}
- if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work);
}
@@ -4039,9 +3937,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
params->rx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
- if (params->rx_am_enabled)
- params->rx_cq_moderation =
- mlx5e_am_get_def_profile(cq_period_mode);
+ if (params->rx_dim_enabled) {
+ switch (cq_period_mode) {
+ case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+ break;
+ case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+ default:
+ params->rx_cq_moderation =
+ net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ }
+ }
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
@@ -4103,7 +4010,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
@@ -4140,6 +4047,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ mlx5e_timestamp_init(priv);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4308,9 +4217,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_ipsec_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
-
- if (priv->channels.params.xdp_prog)
- bpf_prog_put(priv->channels.params.xdp_prog);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..363d8dcb7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
return 0;
}
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_sq *rep_sq, *tmp;
+ struct mlx5e_rep_priv *rpriv;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+ mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ list_del(&rep_sq->list);
+ kfree(rep_sq);
+ }
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_sq *rep_sq;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+ for (i = 0; i < sqns_num; i++) {
+ rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+ if (!rep_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ rep->vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(rep_sq);
+ goto out_err;
+ }
+ rep_sq->send_to_vport_rule = flow_rule;
+ list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5e_sqs2vport_stop(esw, rep);
+ return err;
+}
+
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
sqs[num_sqs++] = c->sq[tc].sqn;
}
- err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+ err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- mlx5_eswitch_sqs2vport_stop(esw, rep);
+ mlx5e_sqs2vport_stop(esw, rep);
}
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
#endif
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
DELAY_PROBE_TIME);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe;
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
neigh_update.netevent_nb);
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct net_device *netdev = rpriv->rep->netdev;
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_neigh_hash_entry *nhe = NULL;
struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
- struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
unregister_netevent_notifier(&neigh_update->netevent_nb);
@@ -662,9 +719,6 @@ static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -682,7 +736,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_can_offload(priv->netdev))
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -827,7 +881,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
@@ -877,6 +931,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
+
+ mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
@@ -904,7 +960,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = PTR_ERR(flow_rule);
goto err_destroy_direct_tirs;
}
- rep->vport_rx_rule = flow_rule;
+ rpriv->vport_rx_rule = flow_rule;
err = mlx5e_tc_init(priv);
if (err)
@@ -913,7 +969,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0;
err_del_flow_rule:
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts:
@@ -924,10 +980,9 @@ err_destroy_direct_rqts:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
mlx5e_tc_cleanup(priv);
- mlx5_del_flow_rules(rep->vport_rx_rule);
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv);
}
@@ -967,10 +1022,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
/* e-Switch vport representors */
static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
int err;
@@ -992,10 +1047,10 @@ err_remove_sqs:
}
static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(rep->netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv);
@@ -1008,8 +1063,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
}
static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
@@ -1019,7 +1075,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1027,8 +1083,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- rep->netdev = netdev;
+ rpriv->netdev = netdev;
rpriv->rep = rep;
+ rep->rep_if[REP_ETH].priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
@@ -1044,7 +1102,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
upriv);
if (err)
@@ -1076,16 +1135,19 @@ err_destroy_netdev:
}
static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct net_device *netdev = rep->netdev;
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
struct mlx5e_priv *upriv;
- unregister_netdev(rep->netdev);
- upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ unregister_netdev(netdev);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+ REP_ETH);
+ upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
upriv);
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1100,18 +1162,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- u8 mac[ETH_ALEN];
-
- mlx5_query_nic_vport_mac_address(mdev, 0, mac);
for (vport = 1; vport < total_vfs; vport++) {
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if = {};
- rep.load = mlx5e_vport_rep_load;
- rep.unload = mlx5e_vport_rep_unload;
- rep.vport = vport;
- ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+ rep_if.load = mlx5e_vport_rep_load;
+ rep_if.unload = mlx5e_vport_rep_unload;
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1123,21 +1180,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
int vport;
for (vport = 1; vport < total_vfs; vport++)
- mlx5_eswitch_unregister_vport_rep(esw, vport);
+ mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep rep;
+ struct mlx5_eswitch_rep_if rep_if;
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = priv->ppriv;
+ rpriv->netdev = priv->netdev;
- mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
- rep.load = mlx5e_nic_rep_load;
- rep.unload = mlx5e_nic_rep_unload;
- rep.vport = FDB_UPLINK_VPORT;
- rep.netdev = priv->netdev;
- mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+ rep_if.load = mlx5e_nic_rep_load;
+ rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.priv = rpriv;
+ INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
mlx5e_rep_register_vf_vports(priv); /* VFs vports */
}
@@ -1148,7 +1208,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+ mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
}
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
+ struct net_device *netdev;
+ struct mlx5_flow_handle *vport_rx_rule;
+ struct list_head vport_sqs_list;
};
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+ return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
struct mlx5e_neigh {
struct net_device *dev;
union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
int encap_size;
};
+struct mlx5e_rep_sq {
+ struct mlx5_flow_handle *send_to_vport_rule;
+ struct list_head list;
+};
+
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7a698f..0d4bb0688faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
- WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
- cqe->op_own);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
return;
}
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- WARN_ONCE(true,
- "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
- icowi->opcode);
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -632,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_last_ethertype_ip(skb, &network_depth)) {
+ if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
+ xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
+ struct hwtstamp_config *tstamp;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
+ priv = mlx5i_epriv(netdev);
+ tstamp = &priv->tstamp;
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index e401d9d245f3..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
- {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
- MLX5_AM_EQE_PROFILES,
- MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
- struct mlx5e_cq_moder cq_moder;
-
- cq_moder = profile[cq_period_mode][ix];
- cq_moder.cq_period_mode = cq_period_mode;
- return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
- int default_profile_ix;
-
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
- else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
- default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
- return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
- MLX5E_AM_START_MEASURE,
- MLX5E_AM_MEASURE_IN_PROGRESS,
- MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
- MLX5E_AM_PARKING_ON_TOP,
- MLX5E_AM_PARKING_TIRED,
- MLX5E_AM_GOING_RIGHT,
- MLX5E_AM_GOING_LEFT,
-};
-
-enum {
- MLX5E_AM_STATS_WORSE,
- MLX5E_AM_STATS_SAME,
- MLX5E_AM_STATS_BETTER,
-};
-
-enum {
- MLX5E_AM_STEPPED,
- MLX5E_AM_TOO_TIRED,
- MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- return true;
- case MLX5E_AM_GOING_RIGHT:
- return (am->steps_left > 1) && (am->steps_right == 1);
- default: /* MLX5E_AM_GOING_LEFT */
- return (am->steps_right > 1) && (am->steps_left == 1);
- }
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- am->tune_state = MLX5E_AM_GOING_LEFT;
- am->steps_left = 0;
- break;
- case MLX5E_AM_GOING_LEFT:
- am->tune_state = MLX5E_AM_GOING_RIGHT;
- am->steps_right = 0;
- break;
- }
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
- if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
- return MLX5E_AM_TOO_TIRED;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- case MLX5E_AM_PARKING_TIRED:
- break;
- case MLX5E_AM_GOING_RIGHT:
- if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
- return MLX5E_AM_ON_EDGE;
- am->profile_ix++;
- am->steps_right++;
- break;
- case MLX5E_AM_GOING_LEFT:
- if (am->profile_ix == 0)
- return MLX5E_AM_ON_EDGE;
- am->profile_ix--;
- am->steps_left++;
- break;
- }
-
- am->tired++;
- return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tired = 0;
- am->tune_state = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
- am->steps_right = 0;
- am->steps_left = 0;
- am->tune_state = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
- am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
- MLX5E_AM_GOING_RIGHT;
- mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
- struct mlx5e_rx_am_stats *prev)
-{
- if (!prev->bpms)
- return curr->bpms ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_SAME;
-
- if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
- return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
- return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
- return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
- MLX5E_AM_STATS_WORSE;
-
- return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
- struct mlx5e_rx_am *am)
-{
- int prev_state = am->tune_state;
- int prev_ix = am->profile_ix;
- int stats_res;
- int step_res;
-
- switch (am->tune_state) {
- case MLX5E_AM_PARKING_ON_TOP:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_SAME)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_PARKING_TIRED:
- am->tired--;
- if (!am->tired)
- mlx5e_am_exit_parking(am);
- break;
-
- case MLX5E_AM_GOING_RIGHT:
- case MLX5E_AM_GOING_LEFT:
- stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
- if (stats_res != MLX5E_AM_STATS_BETTER)
- mlx5e_am_turn(am);
-
- if (mlx5e_am_on_top(am)) {
- mlx5e_am_park_on_top(am);
- break;
- }
-
- step_res = mlx5e_am_step(am);
- switch (step_res) {
- case MLX5E_AM_ON_EDGE:
- mlx5e_am_park_on_top(am);
- break;
- case MLX5E_AM_TOO_TIRED:
- mlx5e_am_park_tired(am);
- break;
- }
-
- break;
- }
-
- if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
- (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
- am->prev_stats = *curr_stats;
-
- return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
- struct mlx5e_rx_am_sample *s)
-{
- s->time = ktime_get();
- s->pkt_ctr = rq->stats.packets;
- s->byte_ctr = rq->stats.bytes;
- s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
- struct mlx5e_rx_am_sample *end,
- struct mlx5e_rx_am_stats *curr_stats)
-{
- /* u32 holds up to 71 minutes, should be enough */
- u32 delta_us = ktime_us_delta(end->time, start->time);
- u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
- u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
- start->byte_ctr);
-
- if (!delta_us)
- return;
-
- curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
- curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
- curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
- delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
- struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
- work);
- struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
- struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
- mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
- cur_profile.usec, cur_profile.pkts);
-
- am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
- struct mlx5e_rx_am *am = &rq->am;
- struct mlx5e_rx_am_sample end_sample;
- struct mlx5e_rx_am_stats curr_stats;
- u16 nevents;
-
- switch (am->state) {
- case MLX5E_AM_MEASURE_IN_PROGRESS:
- nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
- am->start_sample.event_ctr);
- if (nevents < MLX5E_AM_NEVENTS)
- break;
- mlx5e_am_sample(rq, &end_sample);
- mlx5e_am_calc_stats(&am->start_sample, &end_sample,
- &curr_stats);
- if (mlx5e_am_decision(&curr_stats, am)) {
- am->state = MLX5E_AM_APPLY_NEW_PROFILE;
- schedule_work(&am->work);
- break;
- }
- /* fall through */
- case MLX5E_AM_START_MEASURE:
- mlx5e_am_sample(rq, &am->start_sample);
- am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
- break;
- case MLX5E_AM_APPLY_NEW_PROFILE:
- break;
- }
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af87d4d..5a4608281f38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
int err = 0;
/* Temporarily enable local_lb */
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+ if (err)
+ return err;
+
+ if (!lbtp->local_lb) {
+ err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ if (err)
+ return err;
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
- return err;
+ goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
+
+ return 0;
+
+out:
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, false);
- }
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b74ddc7984bc..5f0f3493d747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -71,6 +71,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -99,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats temp, *s = &temp;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ struct mlx5e_ch_stats *ch_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ rq_stats = &c->rq.stats;
+ ch_stats = &c->stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+ s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_page_reuse += rq_stats->page_reuse;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
+ s->rx_cache_waive += rq_stats->cache_waive;
+ s->ch_eq_rearm += ch_stats->eq_rearm;
+
+ for (j = 0; j < priv->channels.params.num_tc; j++) {
+ sq_stats = &c->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ s->tx_csum_none += sq_stats->csum_none;
+ s->tx_csum_partial += sq_stats->csum_partial;
+ }
+ }
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+ memcpy(&priv->stats.sw, s, sizeof(*s));
+}
+
static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
@@ -128,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
+
+ if (!priv->q_counter)
+ return;
+
+ err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
+ if (err)
+ return;
+
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
static const struct counter_desc vport_stats_desc[] = {
{ "rx_vport_unicast_packets",
@@ -200,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
#define PPORT_802_3_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
@@ -252,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -289,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_2819_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
@@ -336,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -376,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx;
}
+static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
+ return;
+
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -418,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ out = pstats->eth_ext_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+}
+
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
static const struct counter_desc pcie_perf_stats_desc[] = {
@@ -505,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+}
+
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -656,6 +848,47 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
return idx;
}
+static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_grp_per_prio_traffic_get_num_stats(priv) +
+ mlx5e_grp_per_prio_pfc_get_num_stats(priv);
+}
+
+static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
+ idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
+ return idx;
+}
+
+static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
+}
+
static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 },
};
@@ -723,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
}
+static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_ipsec_update_stats(priv);
+}
+
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -767,12 +1005,18 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
+static const struct counter_desc ch_stats_desc[] = {
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
}
@@ -785,6 +1029,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
return idx;
for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ ch_stats_desc[j].format, i);
+
+ for (i = 0; i < priv->channels.num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
@@ -808,6 +1057,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_CH_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
+ ch_stats_desc, j);
+
+ for (i = 0; i < channels->num; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
@@ -823,61 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
+/* The stats groups order is opposite to the update_stats() order calls */
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
{
.get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_sw_update_stats,
},
{
.get_num_stats = mlx5e_grp_q_get_num_stats,
.fill_strings = mlx5e_grp_q_fill_strings,
.fill_stats = mlx5e_grp_q_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_q_update_stats,
},
{
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_vport_update_stats,
},
{
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
.fill_strings = mlx5e_grp_802_3_fill_strings,
.fill_stats = mlx5e_grp_802_3_fill_stats,
+ .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
+ .update_stats = mlx5e_grp_802_3_update_stats,
},
{
.get_num_stats = mlx5e_grp_2863_get_num_stats,
.fill_strings = mlx5e_grp_2863_fill_strings,
.fill_stats = mlx5e_grp_2863_fill_stats,
+ .update_stats = mlx5e_grp_2863_update_stats,
},
{
.get_num_stats = mlx5e_grp_2819_get_num_stats,
.fill_strings = mlx5e_grp_2819_fill_strings,
.fill_stats = mlx5e_grp_2819_fill_stats,
+ .update_stats = mlx5e_grp_2819_update_stats,
},
{
.get_num_stats = mlx5e_grp_phy_get_num_stats,
.fill_strings = mlx5e_grp_phy_fill_strings,
.fill_stats = mlx5e_grp_phy_fill_stats,
+ .update_stats = mlx5e_grp_phy_update_stats,
},
{
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ .update_stats = mlx5e_grp_eth_ext_update_stats,
},
{
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
.fill_strings = mlx5e_grp_pcie_fill_strings,
.fill_stats = mlx5e_grp_pcie_fill_stats,
+ .update_stats = mlx5e_grp_pcie_update_stats,
},
{
- .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
- },
- {
- .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
- .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
- .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_fill_stats,
+ .update_stats = mlx5e_grp_per_prio_update_stats,
},
{
.get_num_stats = mlx5e_grp_pme_get_num_stats,
@@ -888,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
.fill_strings = mlx5e_grp_ipsec_fill_strings,
.fill_stats = mlx5e_grp_ipsec_fill_stats,
+ .update_stats = mlx5e_grp_ipsec_update_stats,
},
{
.get_num_stats = mlx5e_grp_channels_get_num_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index d679e21f686e..0b3320a2b072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -44,6 +44,7 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc {
char format[ETH_GSTRING_LEN];
@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
+ u64 ch_eq_rearm;
/* Special handling counters */
u64 link_down_events_phy;
@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
u64 dropped;
};
+struct mlx5e_ch_stats {
+ u64 eq_rearm;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -201,11 +207,17 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
+enum {
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
+};
+
struct mlx5e_priv;
struct mlx5e_stats_grp {
+ u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*update_stats)(struct mlx5e_priv *priv);
};
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec2e88a..fd98b0dc610f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,17 +51,22 @@
#include "en_tc.h"
#include "eswitch.h"
#include "vxlan.h"
+#include "fs_core.h"
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(3),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
};
struct mlx5e_tc_flow {
@@ -71,6 +76,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +99,32 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
+struct mlx5e_hairpin {
+ struct mlx5_hairpin *pair;
+
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5e_priv *func_priv;
+ u32 tdn;
+ u32 tirn;
+
+ int num_channels;
+ struct mlx5e_rqt indir_rqt;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_ttc_table ttc;
+};
+
+struct mlx5e_hairpin_entry {
+ /* a node of a hash table which keeps all the hairpin entries */
+ struct hlist_node hairpin_hlist;
+
+ /* flows sharing the same hairpin */
+ struct list_head flows;
+
+ u16 peer_vhca_id;
+ u8 prio;
+ struct mlx5e_hairpin *hp;
+};
+
struct mod_hdr_key {
int num_actions;
void *actions;
@@ -222,6 +254,417 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+
+ netdev = __dev_get_by_index(net, ifindex);
+ priv = netdev_priv(netdev);
+ return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ void *tirc;
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+ if (err)
+ goto alloc_tdn_err;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+ if (err)
+ goto create_tir_err;
+
+ return 0;
+
+create_tir_err:
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+ mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+ mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+{
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
+ struct mlx5e_priv *priv = hp->func_priv;
+ int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+
+ mlx5e_build_default_indir_rqt(indirection_rqt, sz,
+ hp->num_channels);
+
+ for (i = 0; i < sz; i++) {
+ ix = i;
+ if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, ilog2(sz));
+ ix = indirection_rqt[ix];
+ rqn = hp->pair->rqn[ix];
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+ }
+}
+
+static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
+{
+ int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqtc;
+ u32 *in;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+
+ err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
+ if (!err)
+ hp->indir_rqt.enabled = true;
+
+ kvfree(in);
+ return err;
+}
+
+static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ u32 in[MLX5_ST_SZ_DW(create_tir_in)];
+ int tt, i, err;
+ void *tirc;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
+ mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ err = mlx5_core_create_tir(hp->func_mdev, in,
+ MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
+ if (err) {
+ mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+ }
+ return 0;
+
+err_destroy_tirs:
+ for (i = 0; i < tt; i++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
+ return err;
+}
+
+static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
+}
+
+static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
+ int tt;
+
+ memset(ttc_params, 0, sizeof(*ttc_params));
+
+ ttc_params->any_tt_tirn = hp->tirn;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
+
+ ft_attr->max_fte = MLX5E_NUM_TT;
+ ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
+ ft_attr->prio = MLX5E_TC_PRIO;
+}
+
+static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+ struct ttc_params ttc_params;
+ int err;
+
+ err = mlx5e_hairpin_create_indirect_rqt(hp);
+ if (err)
+ return err;
+
+ err = mlx5e_hairpin_create_indirect_tirs(hp);
+ if (err)
+ goto err_create_indirect_tirs;
+
+ mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
+ if (err)
+ goto err_create_ttc_table;
+
+ netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
+ hp->num_channels, hp->ttc.ft.t->id);
+
+ return 0;
+
+err_create_ttc_table:
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+err_create_indirect_tirs:
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+
+ return err;
+}
+
+static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
+{
+ struct mlx5e_priv *priv = hp->func_priv;
+
+ mlx5e_destroy_ttc_table(priv, &hp->ttc);
+ mlx5e_hairpin_destroy_indirect_tirs(hp);
+ mlx5e_destroy_rqt(priv, &hp->indir_rqt);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+ int peer_ifindex)
+{
+ struct mlx5_core_dev *func_mdev, *peer_mdev;
+ struct mlx5e_hairpin *hp;
+ struct mlx5_hairpin *pair;
+ int err;
+
+ hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ func_mdev = priv->mdev;
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+ pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+ if (IS_ERR(pair)) {
+ err = PTR_ERR(pair);
+ goto create_pair_err;
+ }
+ hp->pair = pair;
+ hp->func_mdev = func_mdev;
+ hp->func_priv = priv;
+ hp->num_channels = params->num_channels;
+
+ err = mlx5e_hairpin_create_transport(hp);
+ if (err)
+ goto create_transport_err;
+
+ if (hp->num_channels > 1) {
+ err = mlx5e_hairpin_rss_init(hp);
+ if (err)
+ goto rss_init_err;
+ }
+
+ return hp;
+
+rss_init_err:
+ mlx5e_hairpin_destroy_transport(hp);
+create_transport_err:
+ mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+ if (hp->num_channels > 1)
+ mlx5e_hairpin_rss_cleanup(hp);
+ mlx5e_hairpin_destroy_transport(hp);
+ mlx5_core_hairpin_destroy(hp->pair);
+ kvfree(hp);
+}
+
+static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
+{
+ return (peer_vhca_id << 16 | prio);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+ u16 peer_vhca_id, u8 prio)
+{
+ struct mlx5e_hairpin_entry *hpe;
+ u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
+
+ hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hairpin_hlist, hash_key) {
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ return hpe;
+ }
+
+ return NULL;
+}
+
+#define UNKNOWN_MATCH_PRIO 8
+
+static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec, u8 *match_prio)
+{
+ void *headers_c, *headers_v;
+ u8 prio_val, prio_mask = 0;
+ bool vlan_present;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
+ netdev_warn(priv->netdev,
+ "only PCP trust state supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
+ vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
+ if (vlan_present) {
+ prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ }
+
+ if (!vlan_present || !prio_mask) {
+ prio_val = UNKNOWN_MATCH_PRIO;
+ } else if (prio_mask != 0x7) {
+ netdev_warn(priv->netdev,
+ "masked priority match not supported for hairpin\n");
+ return -EOPNOTSUPP;
+ }
+
+ *match_prio = prio_val;
+ return 0;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ int peer_ifindex = parse_attr->mirred_ifindex;
+ struct mlx5_hairpin_params params;
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin *hp;
+ u64 link_speed64;
+ u32 link_speed;
+ u8 match_prio;
+ u16 peer_id;
+ int err;
+
+ peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+ if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
+ netdev_warn(priv->netdev, "hairpin is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ if (err)
+ return err;
+ hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
+ if (hpe)
+ goto attach_flow;
+
+ hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+ if (!hpe)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hpe->flows);
+ hpe->peer_vhca_id = peer_id;
+ hpe->prio = match_prio;
+
+ params.log_data_size = 15;
+ params.log_data_size = min_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+ params.log_data_size = max_t(u8, params.log_data_size,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+
+ params.log_num_packets = params.log_data_size -
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
+ params.log_num_packets = min_t(u8, params.log_num_packets,
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+
+ params.q_counter = priv->q_counter;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ params.num_channels = link_speed64;
+
+ hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ if (IS_ERR(hp)) {
+ err = PTR_ERR(hp);
+ goto create_hairpin_err;
+ }
+
+ netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
+ hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
+
+ hpe->hp = hp;
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+
+attach_flow:
+ if (hpe->hp->num_channels > 1) {
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ } else {
+ flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ }
+ list_add(&flow->hairpin, &hpe->flows);
+
+ return 0;
+
+create_hairpin_err:
+ kfree(hpe);
+ return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->hairpin.next;
+
+ list_del(&flow->hairpin);
+
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (list_empty(next)) {
+ struct mlx5e_hairpin_entry *hpe;
+
+ hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ hpe->hp->pair->peer_mdev->priv.name);
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ hash_del(&hpe->hairpin_hlist);
+ kfree(hpe);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +672,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
@@ -238,18 +681,37 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
- int err;
+ int err, dest_ix = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.vlan.ft.t;
- } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return ERR_CAST(counter);
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_add_hairpin_flow;
+ }
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->hairpin_ft;
+ } else {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = attr->hairpin_tirn;
+ }
+ dest_ix++;
+ } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ dest_ix++;
+ }
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter = counter;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter)) {
+ rule = ERR_CAST(counter);
+ goto err_fc_create;
+ }
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter = counter;
+ dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -279,7 +741,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_PRIO,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
- 0, 0);
+ MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
@@ -292,7 +754,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
@@ -309,7 +771,10 @@ err_create_ft:
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
return rule;
}
@@ -330,6 +795,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +1085,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
@@ -1421,6 +1890,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *fmdev, *pmdev;
+ u16 func_id, peer_id;
+
+ fmdev = priv->mdev;
+ pmdev = peer_priv->mdev;
+
+ func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+ peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+ return (func_id == peer_id);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
@@ -1465,6 +1948,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ if (is_tcf_mirred_egress_redirect(a)) {
+ struct net_device *peer_dev = tcf_mirred_dev(a);
+
+ if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+ same_hw_devs(priv, netdev_priv(peer_dev))) {
+ parse_attr->mirred_ifindex = peer_dev->ifindex;
+ flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+ peer_dev->name);
+ return -EINVAL;
+ }
+ continue;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
@@ -1507,6 +2007,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int *out_ttl)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
struct rtable *rt;
struct neighbour *n = NULL;
@@ -1520,9 +2021,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = rt->dst.dev;
@@ -1547,6 +2049,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
struct dst_entry *dst;
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
@@ -1557,9 +2060,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
/* if the egress device isn't on the same HW e-switch, we use the uplink */
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ *out_dev = uplink_rpriv->netdev;
else
*out_dev = dst->dev;
#else
@@ -1859,7 +2363,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+ REP_ETH);
+ struct net_device *up_dev = uplink_rpriv->netdev;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2488,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+ out_dev = tcf_mirred_dev(a);
if (switchdev_port_same_parent_id(priv->netdev,
out_dev)) {
@@ -1996,7 +2501,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- parse_attr->mirred_ifindex = ifindex;
+ parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2687,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
+ hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298eafc3..f292bb346985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
- if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
- mlx5e_rx_am(&c->rq);
+ if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+ struct net_dim_sample dim_sample;
+ net_dim_sample(c->rq.cq.event_ctr,
+ c->rq.stats.packets,
+ c->rq.stats.bytes,
+ &dim_sample);
+ net_dim(&c->rq.dim, dim_sample);
+ }
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 60771865c99c..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -417,7 +417,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
break;
-
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ mlx5_rsc_event(dev, rsn, eqe->type);
+ break;
case MLX5_EVENT_TYPE_PATH_MIG:
case MLX5_EVENT_TYPE_COMM_EST:
case MLX5_EVENT_TYPE_SQ_DRAINED:
@@ -466,7 +470,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
- mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+ mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(dev, cqn, eqe->type);
break;
@@ -530,6 +534,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
+ */
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
+{
+ u32 count_eqe;
+
+ disable_irq(eq->irqn);
+ count_eqe = eq->cons_index;
+ mlx5_eq_int(eq->irqn, eq);
+ count_eqe = eq->cons_index - count_eqe;
+ enable_irq(eq->irqn);
+
+ return count_eqe;
+}
+
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
@@ -715,6 +737,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, fpga))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
@@ -775,7 +800,7 @@ err1:
return err;
}
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
@@ -784,22 +809,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
if (err)
- return err;
+ mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+ err);
}
#endif
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err)
- return err;
+ mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+ err);
- mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ if (err)
+ mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+ err);
mlx5_cmd_use_polling(dev);
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
if (err)
- mlx5_cmd_use_events(dev);
-
- return err;
+ mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+ err);
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..5ecf2cddc16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ vport->vport);
if (!root_ns) {
- esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
return -EOPNOTSUPP;
}
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->ingress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
u8 *smac_v;
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ out:
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_fc *counter = vport->egress.drop_counter;
+ struct mlx5_flow_destination drop_ctr_dst = {0};
+ struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
+ int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ /* Attach egress drop flow counter */
+ if (counter) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ drop_ctr_dst.counter = counter;
+ dst = &drop_ctr_dst;
+ dest_num++;
+ }
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- &flow_act, NULL, 0);
+ &flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &tsar_ctx,
+ tsar_ctx,
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
if (vport->qos.enabled)
return -EEXIST;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
initial_max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
if (!vport->qos.enabled)
return -EIO;
- MLX5_SET(scheduling_context, &sched_ctx, element_type,
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+ vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
- MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
- MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
max_rate);
- MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- &sched_ctx,
+ sched_ctx,
vport->qos.esw_tsar_ix,
bitmask);
if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+ vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->ingress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure ingress drop rule counter failed\n",
+ vport->vport);
+ vport->ingress.drop_counter = NULL;
+ }
+ }
+
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+ vport->egress.drop_counter = mlx5_fc_create(dev, false);
+ if (IS_ERR(vport->egress.drop_counter)) {
+ esw_warn(dev,
+ "vport[%d] configure egress drop rule counter failed\n",
+ vport->vport);
+ vport->egress.drop_counter = NULL;
+ }
+ }
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = vport->dev;
+
+ if (vport->ingress.drop_counter)
+ mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+ if (vport->egress.drop_counter)
+ mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
+ /* create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
+ esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
- esw->offloads.vport_reps =
- kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
- GFP_KERNEL);
- if (!esw->offloads.vport_reps) {
- err = -ENOMEM;
+ err = esw_offloads_init_reps(esw);
+ if (err)
goto abort;
- }
hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
- kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
- kfree(esw->offloads.vport_reps);
+ esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
kfree(esw);
}
@@ -2018,12 +2083,36 @@ unlock:
return err;
}
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 bytes = 0;
+ u16 idx = 0;
+
+ if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+ return;
+
+ if (vport->egress.drop_counter) {
+ idx = vport->egress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+ }
+
+ if (vport->ingress.drop_counter) {
+ idx = vport->ingress.drop_counter->id;
+ mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+ }
+}
+
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+ struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ vf_stats->rx_dropped = stats.rx_dropped;
+ vf_stats->tx_dropped = stats.tx_dropped;
+
free_out:
kvfree(out);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..2fa037066b2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
SRIOV_OFFLOADS
};
+enum {
+ REP_ETH,
+ NUM_REP_TYPES,
+};
+
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
};
struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
+ struct mlx5_fc *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+ u64 rx_dropped;
+ u64 tx_dropped;
};
struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
};
};
-struct mlx5_esw_sq {
- struct mlx5_flow_handle *send_to_vport_rule;
- struct list_head list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+ int (*load)(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *priv;
+ bool valid;
};
struct mlx5_eswitch_rep {
- int (*load)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
u16 vport;
u8 hw_id[ETH_ALEN];
- struct net_device *netdev;
-
- struct mlx5_flow_handle *vport_rx_rule;
- struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
- bool valid;
};
struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+ u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep);
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *rep);
+ struct mlx5_eswitch_rep_if *rep_if,
+ u8 rep_type);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+ int vport_index,
+ u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (!rep->valid)
+ if (!rep->rep_if[REP_ETH].valid)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
return err;
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
return flow_rule;
}
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
-{
- struct mlx5_esw_sq *esw_sq, *tmp;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return;
-
- list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
- mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
- list_del(&esw_sq->list);
- kfree(esw_sq);
- }
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_esw_sq *esw_sq;
- int err;
- int i;
-
- if (esw->mode != SRIOV_OFFLOADS)
- return 0;
-
- for (i = 0; i < sqns_num; i++) {
- esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
- if (!esw_sq) {
- err = -ENOMEM;
- goto out_err;
- }
-
- /* Add re-inject rule to the PF/representor sqs */
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- rep->vport,
- sqns_array[i]);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- kfree(esw_sq);
- goto out_err;
- }
- esw_sq->send_to_vport_rule = flow_rule;
- list_add(&esw_sq->list, &rep->vport_sqs_list);
- }
- return 0;
-
-out_err:
- mlx5_eswitch_sqs2vport_stop(esw, rep);
- return err;
+ mlx5_del_flow_rules(rule);
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
return err;
}
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+ kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+ int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_esw_offload *offloads;
+ struct mlx5_eswitch_rep *rep;
+ u8 hw_id[ETH_ALEN];
+ int vport;
+
+ esw->offloads.vport_reps = kcalloc(total_vfs,
+ sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps)
+ return -ENOMEM;
+
+ offloads = &esw->offloads;
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+ for (vport = 0; vport < total_vfs; vport++) {
+ rep = &offloads->vport_reps[vport];
+
+ rep->vport = vport;
+ ether_addr_copy(rep->hw_id, hw_id);
+ }
+
+ offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+ return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = nvports - 1; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ rep->rep_if[rep_type].unload(rep);
+ }
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = NUM_REP_TYPES;
+
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+ u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int vport;
int err;
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->rep_if[rep_type].valid)
+ continue;
+
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
+
+ return 0;
+
+err_reps:
+ esw_offloads_unload_reps_type(esw, vport, rep_type);
+ return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+ u8 rep_type = 0;
+ int err;
+
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+ if (err)
+ goto err_reps;
+ }
+
+ return err;
+
+err_reps:
+ while (rep_type-- > 0)
+ esw_offloads_unload_reps_type(esw, nvports, rep_type);
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ int err;
+
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto create_fg_err;
- for (vport = 0; vport < nvports; vport++) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
-
- err = rep->load(esw, rep);
- if (err)
- goto err_reps;
- }
+ err = esw_offloads_load_reps(esw, nvports);
+ if (err)
+ goto err_reps;
return 0;
err_reps:
- for (vport--; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
esw_destroy_vport_rx_group(esw);
create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
- struct mlx5_eswitch_rep *rep;
- int vport;
-
- for (vport = nvports - 1; vport >= 0; vport--) {
- rep = &esw->offloads.vport_reps[vport];
- if (!rep->valid)
- continue;
- rep->unload(esw, rep);
- }
-
+ esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
- struct mlx5_eswitch_rep *__rep)
+ struct mlx5_eswitch_rep_if *__rep_if,
+ u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
- struct mlx5_eswitch_rep *rep;
-
- rep = &offloads->vport_reps[vport_index];
+ struct mlx5_eswitch_rep_if *rep_if;
- memset(rep, 0, sizeof(*rep));
+ rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
- rep->load = __rep->load;
- rep->unload = __rep->unload;
- rep->vport = __rep->vport;
- rep->netdev = __rep->netdev;
- ether_addr_copy(rep->hw_id, __rep->hw_id);
+ rep_if->load = __rep_if->load;
+ rep_if->unload = __rep_if->unload;
+ rep_if->priv = __rep_if->priv;
- INIT_LIST_HEAD(&rep->vport_sqs_list);
- rep->valid = true;
+ rep_if->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index)
+ int vport_index, u8 rep_type)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
- rep->unload(esw, rep);
+ rep->rep_if[rep_type].unload(rep);
- rep->valid = false;
+ rep->rep_if[rep_type].valid = false;
}
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
#define UPLINK_REP_INDEX 0
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
- return rep->netdev;
+ return rep->rep_if[rep_type].priv;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4392f741c5f..e6175f8ac0e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -688,7 +688,7 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
@@ -727,7 +727,7 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, next_rcv_psn,
MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
@@ -888,7 +888,8 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
MLX5_ROCE_VERSION_2,
MLX5_ROCE_L3_TYPE_IPV6,
- remote_ip, remote_mac, true, 0);
+ remote_ip, remote_mac, true, 0,
+ MLX5_FPGA_PORT_NUM);
if (err) {
mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
ret = ERR_PTR(err);
@@ -954,7 +955,7 @@ err_cq:
mlx5_fpga_conn_destroy_cq(conn);
err_gid:
mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
- NULL, false, 0);
+ NULL, false, 0, MLX5_FPGA_PORT_NUM);
err_rsvd_gid:
mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
err:
@@ -982,7 +983,7 @@ void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
mlx5_fpga_conn_destroy_cq(conn);
mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
- NULL, NULL, false, 0);
+ NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
kfree(conn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
index 3c11d6e2160a..14962969c5ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
+ if (!size)
+ return -EINVAL;
+
if (!fdev->mdev)
return -ENOTCONN;
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
+ if (!size)
+ return -EINVAL;
+
if (!fdev->mdev)
return -ENOTCONN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c70fd663a633..c025c98700e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -89,6 +89,9 @@
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+#define KERNEL_NIC_TC_NUM_PRIOS 1
+#define KERNEL_NIC_TC_NUM_LEVELS 2
+
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
@@ -134,7 +137,7 @@ static struct init_tree_node {
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
@@ -174,6 +177,8 @@ static void del_hw_fte(struct fs_node *node);
static void del_sw_flow_table(struct fs_node *node);
static void del_sw_flow_group(struct fs_node *node);
static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
/* Delete rule (destination) is special case that
* requires to lock the FTE for all the deletion process.
*/
@@ -408,6 +413,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
+static void del_sw_ns(struct fs_node *node)
+{
+ kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+ kfree(node);
+}
+
static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
@@ -2014,16 +2029,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &steering->fdb_root_ns->ns;
else
return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (steering->esw_egress_root_ns)
- return &steering->esw_egress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (steering->esw_ingress_root_ns)
- return &steering->esw_ingress_root_ns->ns;
- else
- return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2054,6 +2059,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (steering->esw_egress_root_ns &&
+ steering->esw_egress_root_ns[vport])
+ return &steering->esw_egress_root_ns[vport]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (steering->esw_ingress_root_ns &&
+ steering->esw_ingress_root_ns[vport])
+ return &steering->esw_ingress_root_ns[vport]->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio, int num_levels)
{
@@ -2064,7 +2096,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, NULL, NULL);
+ tree_init_node(&fs_prio->node, NULL, del_sw_prio);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -2090,7 +2122,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, NULL, NULL);
+ tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -2331,13 +2363,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
clean_tree(&root_ns->ns.node);
}
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+}
+
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->esw_egress_root_ns);
- cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_egress_acls_root_ns(dev);
+ cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2406,34 +2466,86 @@ out_err:
return PTR_ERR(prio);
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns)
+ steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
{
struct fs_prio *prio;
- steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns)
+ steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns[vport])
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
- MLX5_TOTAL_VPORTS(steering->dev));
+ prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
return PTR_ERR_OR_ZERO(prio);
}
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_egress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ kfree(steering->esw_egress_root_ns);
+ return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_ingress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+ err = init_ingress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ kfree(steering->esw_ingress_root_ns);
+ return err;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
@@ -2476,12 +2588,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(steering);
+ err = init_egress_acls_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(steering);
+ err = init_ingress_acls_root_ns(dev);
if (err)
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..05262708f14b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
- struct mlx5_flow_root_namespace *esw_egress_root_ns;
- struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace **esw_egress_root_ns;
+ struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
};
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f8650033..b7ab929d5f8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5ef1b56b6a96..9d11e92fb541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -195,12 +195,20 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+
+ if (MLX5_CAP_GEN(dev, sw_owner_id)) {
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i,
+ sw_owner_id[i]);
+ }
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a0e797ad001..21d29f7936f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
u32 fw;
int i;
- /* If the syndrom is 0, the device is OK and no need to print buffer */
+ /* If the syndrome is 0, the device is OK and no need to print buffer */
if (!ioread8(&h->synd))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9219c8..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mlx5i_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d2a66dc4adc6..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
@@ -57,7 +56,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+ mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
/* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ?
@@ -86,6 +85,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mlx5i_build_nic_params(mdev, &priv->channels.params);
+ mlx5e_timestamp_init(priv);
+
/* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -187,7 +188,7 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
- MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
ret = mlx5_core_create_qp(mdev, qp, in, inlen);
@@ -240,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
- int err;
+ struct ttc_params ttc_params = {};
+ int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
@@ -255,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ mlx5e_set_ttc_basic_params(priv, &ttc_params);
+ mlx5e_set_inner_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
+
+ err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
@@ -272,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -281,8 +292,8 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
+ mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
@@ -396,7 +407,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -450,7 +461,6 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(epriv, false);
mlx5e_activate_priv_channels(epriv);
- mlx5e_timestamp_set(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
@@ -485,7 +495,7 @@ static int mlx5i_close(struct net_device *netdev)
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mlx5i_uninit_underlay_qp(epriv);
mlx5e_deactivate_priv_channels(epriv);
- mlx5e_close_channels(&epriv->channels);;
+ mlx5e_close_channels(&epriv->channels);
unlock:
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 49008022c306..6d9053bcbe95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
-/* Shared ndo functionts */
+/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02cc979b..b69e9d847a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
+ .ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return mlx5i_ioctl(dev, ifr, cmd);
+}
+
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f26f97fe4666..582b2f18010a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
+ bool reset, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ MLX5_SET(query_cong_statistics_in, in, clear, reset);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
{
return dev->priv.lag;
@@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
/* If bonded, we do not add an IB device for PF1. */
return false;
}
+
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ u64 *values,
+ int num_counters,
+ size_t *offsets)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+ struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+ struct mlx5_lag *ldev;
+ int num_ports;
+ int ret, i, j;
+ void *out;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ memset(values, 0, sizeof(*values) * num_counters);
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ if (ldev && mlx5_lag_is_bonded(ldev)) {
+ num_ports = MLX5_MAX_PORTS;
+ mdev[0] = ldev->pf[0].dev;
+ mdev[1] = ldev->pf[1].dev;
+ } else {
+ num_ports = 1;
+ mdev[0] = dev;
+ }
+
+ for (i = 0; i < num_ports; ++i) {
+ ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+ if (ret)
+ goto unlock;
+
+ for (j = 0; j < num_counters; ++j)
+ values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
+ }
+
+unlock:
+ mutex_unlock(&lag_mutex);
+ kvfree(out);
+ return ret;
+}
+EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed62b231..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -31,6 +31,8 @@
*/
#include <linux/clocksource.h>
+#include <linux/highmem.h>
+#include <rdma/mlx5-abi.h>
#include "en.h"
enum {
@@ -71,6 +73,28 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(mdev) & cc->mask;
}
+static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 sign;
+
+ if (!clock_info)
+ return;
+
+ sign = smp_load_acquire(&clock_info->sign);
+ smp_store_mb(clock_info->sign,
+ sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
+
+ clock_info->cycles = clock->tc.cycle_last;
+ clock_info->mult = clock->cycles.mult;
+ clock_info->nsec = clock->tc.nsec;
+ clock_info->frac = clock->tc.frac;
+
+ smp_store_release(&clock_info->sign,
+ sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
+}
+
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
@@ -109,6 +133,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
@@ -123,6 +148,7 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
write_lock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -152,6 +178,7 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
write_lock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -179,6 +206,7 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -423,9 +451,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
+ ptp_event.index = pin;
+ ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
- ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ ptp_event.pps_times.ts_real =
+ ns_to_timespec64(ptp_event.timestamp);
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
@@ -470,6 +502,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
+ clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
@@ -482,6 +515,25 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
clock->overflow_period = ns;
+ mdev->clock_info_page = alloc_page(GFP_KERNEL);
+ if (mdev->clock_info_page) {
+ mdev->clock_info = kmap(mdev->clock_info_page);
+ if (!mdev->clock_info) {
+ __free_page(mdev->clock_info_page);
+ mlx5_core_warn(mdev, "failed to map clock page\n");
+ } else {
+ mdev->clock_info->sign = 0;
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period =
+ clock->overflow_period;
+ }
+ }
+
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
@@ -521,5 +573,12 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
+
+ if (mdev->clock_info) {
+ kunmap(mdev->clock_info_page);
+ __free_page(mdev->clock_info_page);
+ mdev->clock_info = NULL;
+ }
+
kfree(clock->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 573f59f46d41..7722a3f9bb68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id)
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
@@ -148,6 +148,9 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
memcpy(addr_l3_addr, gid, gidsz);
}
+ if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
+ MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
+
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5f323442cc5a..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -75,6 +75,8 @@ static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static u32 sw_owner_id[4];
+
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -317,11 +319,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
- struct irq_affinity irqdesc = {
- .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
- };
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
+ int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE;
@@ -331,22 +331,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
if (!priv->irq_info)
- goto err_free_msix;
+ return -ENOMEM;
- nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+ nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
- &irqdesc);
- if (nvec < 0)
- return nvec;
+ PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
-err_free_msix:
+err_free_irq_info:
kfree(priv->irq_info);
- return -ENOMEM;
+ return err;
}
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -552,6 +553,15 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+
+ if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
+ MLX5_SET(cmd_hca_cap,
+ set_hca_cap,
+ num_vhca_ports,
+ MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
+
err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
@@ -582,8 +592,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int ret = 0;
/* Disable local_lb by default */
- if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(dev, disable_local_lb))
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
@@ -622,6 +631,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
return (u64)timer_l | (u64)timer_h1 << 32;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+ priv->irq_info[i].mask);
+
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+ return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
@@ -1052,7 +1118,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_cmd_init_hca(dev);
+ err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop;
@@ -1068,9 +1134,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_poll;
}
- if (boot && mlx5_init_once(dev, priv)) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
+ if (boot) {
+ err = mlx5_init_once(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
+ goto err_stop_poll;
+ }
}
err = mlx5_alloc_irq_vectors(dev);
@@ -1080,8 +1149,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
dev->priv.uar = mlx5_get_uars_page(dev);
- if (!dev->priv.uar) {
+ if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ err = PTR_ERR(dev->priv.uar);
goto err_disable_msix;
}
@@ -1097,6 +1167,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_affinity_hints;
+ }
+
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1230,9 @@ err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
+ mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@@ -1222,6 +1301,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
@@ -1574,6 +1654,8 @@ static int __init init(void)
{
int err;
+ get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
+
mlx5_core_verify_params();
mlx5_register_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index ff4a0b889a6f..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,7 +86,7 @@ enum {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index db9e665ab104..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -98,6 +98,11 @@ static u64 sq_allowed_event_types(void)
return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
}
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
static bool is_event_type_allowed(int rsc_type, int event_type)
{
switch (rsc_type) {
@@ -107,6 +112,8 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
return BIT(event_type) & rq_allowed_event_types();
case MLX5_EVENT_QUEUE_TYPE_SQ:
return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
default:
WARN(1, "Event arrived for unknown resource type");
return false;
@@ -116,6 +123,7 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
{
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_dct *dct;
struct mlx5_core_qp *qp;
if (!common)
@@ -134,7 +142,11 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
break;
-
+ case MLX5_RES_DCT:
+ dct = (struct mlx5_core_dct *)common;
+ if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
+ complete(&dct->drained);
+ break;
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
@@ -142,9 +154,9 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
-static int create_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ int rsc_type)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
int err;
@@ -165,8 +177,8 @@ static int create_qprqsq_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+static void destroy_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
unsigned long flags;
@@ -179,6 +191,40 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
+ u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ return err;
+ }
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ err = create_resource_common(dev, qp, MLX5_RES_DCT);
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
+ (void *)&out, sizeof(dout));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in, int inlen)
@@ -197,7 +243,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
- err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
@@ -213,13 +259,54 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
- MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
- MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -229,7 +316,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
mlx5_debug_qp_remove(dev, qp);
- destroy_qprqsq_common(dev, qp);
+ destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
@@ -405,6 +492,20 @@ int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
@@ -441,7 +542,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
rq->qpn = rqn;
- err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
goto err_destroy_rq;
@@ -457,7 +558,7 @@ EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
- destroy_qprqsq_common(dev, rq);
+ destroy_resource_common(dev, rq);
mlx5_core_destroy_rq(dev, rq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
@@ -473,7 +574,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
sq->qpn = sqn;
- err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -489,7 +590,7 @@ EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
- destroy_qprqsq_common(dev, sq);
+ destroy_resource_common(dev, sq);
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e651e4c02867..d3c33e9eea72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
return ret_entry;
}
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
- MLX5_SET(set_rate_limit_in, in, opcode,
- MLX5_CMD_OP_SET_RATE_LIMIT);
- MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+ MLX5_SET(set_pp_rate_limit_in, in, opcode,
+ MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
entry->refcount++;
} else {
/* new rate limit */
- err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+ err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
if (err) {
mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
entry->refcount--;
if (!entry->refcount) {
/* need to remove rate */
- mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+ mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
entry->rate = 0;
}
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rate)
- mlx5_set_rate_limit_cmd(dev, 0,
- table->rl_entry[i].index);
+ mlx5_set_pp_rate_limit_cmd(dev, 0,
+ table->rl_entry[i].index);
kfree(dev->priv.rl_table.rl_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7a9ffd..9e38343a951f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,217 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(rqc, rqc, hairpin, 1);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+ struct mlx5_hairpin_params *params, u32 *sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(sqc, sqc, hairpin, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+ MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+ MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
+
+ return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+ struct mlx5_hairpin_params *params)
+{
+ int i, j, err;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
+ if (err)
+ goto out_err_rq;
+ }
+
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
+ if (err)
+ goto out_err_sq;
+ }
+
+ return 0;
+
+out_err_sq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
+ i = hp->num_channels;
+out_err_rq:
+ for (j = 0; j < i; j++)
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
+ return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ for (i = 0; i < hp->num_channels; i++) {
+ mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ }
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_sq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ void *rqc;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+ MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ return mlx5_core_modify_rq(func_mdev, rqn,
+ in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+ int curr_state, int next_state,
+ u16 peer_vhca, u32 peer_rq)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ if (next_state == MLX5_RQC_STATE_RDY) {
+ MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+ MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+ }
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ return mlx5_core_modify_sq(peer_mdev, sqn,
+ in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+ int i, j, err;
+
+ /* set peer SQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
+ MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
+ if (err)
+ goto err_modify_sq;
+ }
+
+ /* set func RQs */
+ for (i = 0; i < hp->num_channels; i++) {
+ err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
+ MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+ MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
+ if (err)
+ goto err_modify_rq;
+ }
+
+ return 0;
+
+err_modify_rq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+ i = hp->num_channels;
+err_modify_sq:
+ for (j = 0; j < i; j++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+ return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+ int i;
+
+ /* unset func RQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
+ MLX5_RQC_STATE_RST, 0, 0);
+
+ /* unset peer SQs */
+ for (i = 0; i < hp->num_channels; i++)
+ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params)
+{
+ struct mlx5_hairpin *hp;
+ int size, err;
+
+ size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
+ hp = kzalloc(size, GFP_KERNEL);
+ if (!hp)
+ return ERR_PTR(-ENOMEM);
+
+ hp->func_mdev = func_mdev;
+ hp->peer_mdev = peer_mdev;
+ hp->num_channels = params->num_channels;
+
+ hp->rqn = (void *)hp + sizeof(*hp);
+ hp->sqn = hp->rqn + params->num_channels;
+
+ /* alloc and pair func --> peer hairpin */
+ err = mlx5_hairpin_create_queues(hp, params);
+ if (err)
+ goto err_create_queues;
+
+ err = mlx5_hairpin_pair_queues(hp);
+ if (err)
+ goto err_pair_queues;
+
+ return hp;
+
+err_pair_queues:
+ mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+ kfree(hp);
+ return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+ mlx5_hairpin_unpair_queues(hp);
+ mlx5_hairpin_destroy_queues(hp);
+ kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b25908d01..8b97066dd1f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
struct mlx5_uars_page *ret;
mutex_lock(&mdev->priv.bfregs.reg_head.lock);
- if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
- ret = alloc_uars_page(mdev, false);
- if (IS_ERR(ret)) {
- ret = NULL;
- goto out;
- }
- list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
- } else {
+ if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
+ goto out;
}
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret))
+ goto out;
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b0025b13..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,9 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+/* Mutex to hold while enabling or disabling RoCE */
+static DEFINE_MUTEX(mlx5_roce_en_lock);
+
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
@@ -908,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
void *in;
int err;
- mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+ if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+ !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ return 0;
+
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_mc_local_lb, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
-
- MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_mc_local_lb, 1);
+
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_uc_local_lb, 1);
+
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ if (!err)
+ mlx5_core_dbg(mdev, "%s local_lb\n",
+ enable ? "enable" : "disable");
+
kvfree(in);
return err;
}
@@ -988,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_inc_return(&mdev->roce.roce_en) != 1)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (!mdev->roce.roce_en)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+
+ if (!err)
+ mdev->roce.roce_en++;
+ mutex_unlock(&mlx5_roce_en_lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_dec_return(&mdev->roce.roce_en) != 0)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (mdev->roce.roce_en) {
+ mdev->roce.roce_en--;
+ if (mdev->roce.roce_en == 0)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+
+ if (err)
+ mdev->roce.roce_en++;
+ }
+ mutex_unlock(&mlx5_roce_en_lock);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
@@ -1100,3 +1131,61 @@ ex:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ err = mlx5_nic_vport_enable_roce(port_mdev);
+ if (err)
+ goto free;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria,
+ MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+free:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
+
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id, 0);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria, 0);
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (!err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6cfc70..2f74953e4561 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
- spin_lock(&vxlan_db->lock);
+ spin_lock_bh(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock(&vxlan_db->lock);
+ spin_unlock_bh(&vxlan_db->lock);
return vxlan;
}
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
- if (mlx5e_vxlan_lookup_port(priv, port))
+ mutex_lock(&priv->state_lock);
+ vxlan = mlx5e_vxlan_lookup_port(priv, port);
+ if (vxlan) {
+ atomic_inc(&vxlan->refcount);
goto free_work;
+ }
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
goto err_delete_port;
vxlan->udp_port = port;
+ atomic_set(&vxlan->refcount, 1);
- spin_lock_irq(&vxlan_db->lock);
+ spin_lock_bh(&vxlan_db->lock);
err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_irq(&vxlan_db->lock);
+ spin_unlock_bh(&vxlan_db->lock);
if (err)
goto err_free;
@@ -113,35 +118,39 @@ err_free:
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
free_work:
+ mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan;
+ bool remove = false;
- spin_lock_irq(&vxlan_db->lock);
- vxlan = radix_tree_delete(&vxlan_db->tree, port);
- spin_unlock_irq(&vxlan_db->lock);
-
+ mutex_lock(&priv->state_lock);
+ spin_lock_bh(&vxlan_db->lock);
+ vxlan = radix_tree_lookup(&vxlan_db->tree, port);
if (!vxlan)
- return;
-
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
- kfree(vxlan);
-}
+ goto out_unlock;
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
+ if (atomic_dec_and_test(&vxlan->refcount)) {
+ radix_tree_delete(&vxlan_db->tree, port);
+ remove = true;
+ }
- __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+ spin_unlock_bh(&vxlan_db->lock);
+ if (remove) {
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ kfree(vxlan);
+ }
+ mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
struct mlx5e_vxlan *vxlan;
unsigned int port = 0;
- spin_lock_irq(&vxlan_db->lock);
+ /* Lockless since we are the only radix-tree consumers, wq is disabled */
while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
port = vxlan->udp_port;
- spin_unlock_irq(&vxlan_db->lock);
- __mlx5e_vxlan_core_del_port(priv, (u16)port);
- spin_lock_irq(&vxlan_db->lock);
+ radix_tree_delete(&vxlan_db->tree, port);
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ kfree(vxlan);
}
- spin_unlock_irq(&vxlan_db->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c048e3..5ef6ae7d568a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
#include "en.h"
struct mlx5e_vxlan {
+ atomic_t refcount;
u16 udp_port;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc874ad..3529b545675d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ bool reload_fail;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+ int err;
+
+ if (!mlxsw_bus->reset)
+ return -EOPNOTSUPP;
+
+ mlxsw_core_bus_device_unregister(mlxsw_core, true);
+ mlxsw_bus->reset(mlxsw_core->bus_priv);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
+ if (err)
+ mlxsw_core->reload_fail = true;
+ return err;
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv)
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct devlink *devlink;
size_t alloc_size;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
if (!mlxsw_driver)
return -EINVAL;
- alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
- if (!devlink) {
- err = -ENOMEM;
- goto err_devlink_alloc;
+
+ if (!reload) {
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
+ err = -ENOMEM;
+ goto err_devlink_alloc;
+ }
}
mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_bus_init;
+ if (mlxsw_driver->resources_register && !reload) {
+ err = mlxsw_driver->resources_register(mlxsw_core);
+ if (err)
+ goto err_register_resources;
+ }
+
err = mlxsw_ports_init(mlxsw_core);
if (err)
goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = devlink_register(devlink, mlxsw_bus_info->dev);
- if (err)
- goto err_devlink_register;
+ if (!reload) {
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+ }
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
@@ -1057,7 +1090,8 @@ err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
err_hwmon_init:
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -1067,26 +1101,40 @@ err_alloc_lag_mapping:
err_ports_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- devlink_free(devlink);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+ if (!reload)
+ devlink_free(devlink);
err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ bool reload)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ if (mlxsw_core->reload_fail)
+ goto reload_fail;
+
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
- devlink_unregister(devlink);
+ if (!reload)
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core);
+ if (!reload)
+ devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (reload)
+ return;
+reload_fail:
devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
}
EXPORT_SYMBOL(mlxsw_core_flush_owq);
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (!driver->kvd_sizes_get)
+ return -EINVAL;
+
+ return driver->kvd_sizes_get(mlxsw_core, profile,
+ p_single_size, p_double_size,
+ p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af72fc4..5ddafd74dc00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+ void *bus_priv, bool reload,
+ struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
+ int (*resources_register)(struct mlxsw_core *mlxsw_core);
+ int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
@@ -332,6 +343,7 @@ struct mlxsw_bus {
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res);
void (*fini)(void *bus_priv);
+ void (*reset)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 6a979a09ab72..b698fb481b2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -310,9 +310,33 @@ struct mlxsw_afa_block {
struct mlxsw_afa_set *first_set;
struct mlxsw_afa_set *cur_set;
unsigned int cur_act_index; /* In current set. */
- struct list_head fwd_entry_ref_list;
+ struct list_head resource_list; /* List of resources held by actions
+ * in this block.
+ */
};
+struct mlxsw_afa_resource {
+ struct list_head list;
+ void (*destructor)(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource);
+};
+
+static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ list_add(&resource->list, &block->resource_list);
+}
+
+static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
+{
+ struct mlxsw_afa_resource *resource, *tmp;
+
+ list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
+ list_del(&resource->list);
+ resource->destructor(block, resource);
+ }
+}
+
struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
{
struct mlxsw_afa_block *block;
@@ -320,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
- INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+ INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
/* At least one action set is always present, so just create it here */
@@ -336,8 +360,6 @@ err_first_set_create:
}
EXPORT_SYMBOL(mlxsw_afa_block_create);
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
-
void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_set *set = block->first_set;
@@ -348,7 +370,7 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
mlxsw_afa_set_put(block->afa, set);
set = next_set;
} while (set);
- mlxsw_afa_fwd_entry_refs_destroy(block);
+ mlxsw_afa_resources_destroy(block);
kfree(block);
}
EXPORT_SYMBOL(mlxsw_afa_block_destroy);
@@ -489,10 +511,29 @@ static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
}
struct mlxsw_afa_fwd_entry_ref {
- struct list_head list;
+ struct mlxsw_afa_resource resource;
struct mlxsw_afa_fwd_entry *fwd_entry;
};
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+ mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+ kfree(fwd_entry_ref);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+
+ fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
+ resource);
+ mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
static struct mlxsw_afa_fwd_entry_ref *
mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
{
@@ -509,7 +550,8 @@ mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
goto err_fwd_entry_get;
}
fwd_entry_ref->fwd_entry = fwd_entry;
- list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+ fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
+ mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
return fwd_entry_ref;
err_fwd_entry_get:
@@ -517,23 +559,51 @@ err_fwd_entry_get:
return ERR_PTR(err);
}
+struct mlxsw_afa_counter {
+ struct mlxsw_afa_resource resource;
+ u32 counter_index;
+};
+
static void
-mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_counter *counter)
{
- list_del(&fwd_entry_ref->list);
- mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
- kfree(fwd_entry_ref);
+ block->afa->ops->counter_index_put(block->afa->ops_priv,
+ counter->counter_index);
+ kfree(counter);
+}
+
+static void
+mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_counter *counter;
+
+ counter = container_of(resource, struct mlxsw_afa_counter, resource);
+ mlxsw_afa_counter_destroy(block, counter);
}
-static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+static struct mlxsw_afa_counter *
+mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
{
- struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
- struct mlxsw_afa_fwd_entry_ref *tmp;
+ struct mlxsw_afa_counter *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->counter_index_get(block->afa->ops_priv,
+ &counter->counter_index);
+ if (err)
+ goto err_counter_index_get;
+ counter->resource.destructor = mlxsw_afa_counter_destructor;
+ mlxsw_afa_resource_add(block, &counter->resource);
+ return counter;
- list_for_each_entry_safe(fwd_entry_ref, tmp,
- &block->fwd_entry_ref_list, list)
- mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+err_counter_index_get:
+ kfree(counter);
+ return ERR_PTR(err);
}
#define MLXSW_AFA_ONE_ACTION_LEN 32
@@ -690,6 +760,16 @@ MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
*/
MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+/* afa_trapdisc_mirror_agent
+ * Mirror agent.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+
+/* afa_trapdisc_mirror_enable
+ * Mirror enable.
+ */
+MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+
static inline void
mlxsw_afa_trapdisc_pack(char *payload,
enum mlxsw_afa_trapdisc_trap_action trap_action,
@@ -701,6 +781,14 @@ mlxsw_afa_trapdisc_pack(char *payload,
mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
}
+static inline void
+mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
+{
+ mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+}
+
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
{
char *act = mlxsw_afa_block_append_action(block,
@@ -746,6 +834,104 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+struct mlxsw_afa_mirror {
+ struct mlxsw_afa_resource resource;
+ int span_id;
+ u8 local_in_port;
+ u8 local_out_port;
+ bool ingress;
+};
+
+static void
+mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_mirror *mirror)
+{
+ block->afa->ops->mirror_del(block->afa->ops_priv,
+ mirror->local_in_port,
+ mirror->local_out_port,
+ mirror->ingress);
+ kfree(mirror);
+}
+
+static void
+mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_mirror *mirror;
+
+ mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
+ mlxsw_afa_mirror_destroy(block, mirror);
+}
+
+static struct mlxsw_afa_mirror *
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
+ if (!mirror)
+ return ERR_PTR(-ENOMEM);
+
+ err = block->afa->ops->mirror_add(block->afa->ops_priv,
+ local_in_port, local_out_port,
+ ingress, &mirror->span_id);
+ if (err)
+ goto err_mirror_add;
+
+ mirror->ingress = ingress;
+ mirror->local_out_port = local_out_port;
+ mirror->local_in_port = local_in_port;
+ mirror->resource.destructor = mlxsw_afa_mirror_destructor;
+ mlxsw_afa_resource_add(block, &mirror->resource);
+ return mirror;
+
+err_mirror_add:
+ kfree(mirror);
+ return ERR_PTR(err);
+}
+
+static int
+mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
+ u8 mirror_agent)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ return 0;
+}
+
+int
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port, bool ingress)
+{
+ struct mlxsw_afa_mirror *mirror;
+ int err;
+
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ ingress);
+ if (IS_ERR(mirror))
+ return PTR_ERR(mirror);
+
+ err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
+ if (err)
+ goto err_append_allocated_mirror;
+
+ return 0;
+
+err_append_allocated_mirror:
+ mlxsw_afa_mirror_destroy(block, mirror);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -853,11 +1039,10 @@ mlxsw_afa_polcnt_pack(char *payload,
mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
}
-int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index)
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_POLCNT_CODE,
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
@@ -865,6 +1050,32 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
counter_index);
return 0;
}
+EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
+
+int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
+ u32 *p_counter_index)
+{
+ struct mlxsw_afa_counter *counter;
+ u32 counter_index;
+ int err;
+
+ counter = mlxsw_afa_counter_create(block);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+ counter_index = counter->counter_index;
+
+ err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
+ if (err)
+ goto err_append_allocated_counter;
+
+ if (p_counter_index)
+ *p_counter_index = counter_index;
+ return 0;
+
+err_append_allocated_counter:
+ mlxsw_afa_counter_destroy(block, counter);
+ return err;
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
/* Virtual Router and Forwarding Domain Action
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index a8d3314c3a24..43132293475c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -46,6 +46,12 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+ int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
+ void (*counter_index_put)(void *priv, unsigned int counter_index);
+ int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id);
+ void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ bool ingress);
};
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -63,12 +69,17 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
+int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
+ u8 local_in_port, u8 local_out_port,
+ bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et);
+int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
+ u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
- u32 counter_index);
+ u32 *p_counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa05b077..25f9915ebd82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->dev = &client->dev;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
- &mlxsw_i2c_bus, mlxsw_i2c);
+ &mlxsw_i2c_bus, mlxsw_i2c, false,
+ NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
- mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+ mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
mutex_destroy(&mlxsw_i2c->cmd.lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index 28427f0758c7..31c886edc791 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -42,7 +42,7 @@
struct mlxsw_item {
unsigned short offset; /* bytes in container */
- unsigned short step; /* step in bytes for indexed items */
+ short step; /* step in bytes for indexed items */
unsigned short in_step_offset; /* offset within one step */
unsigned char shift; /* shift in bits */
unsigned char element_size; /* size of element in bit array */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..85faa87bf42d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
} comp;
} cmd;
struct mlxsw_bus_info bus_info;
+ const struct pci_device_id *id;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
}
static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
{
- u32 single_size, double_size, linear_size;
-
- if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
- return -EIO;
-
- linear_size = profile->kvd_linear_size;
+ u64 single_size, double_size, linear_size;
+ int err;
- /* The hash part is what left of the kvd without the
- * linear part. It is split to the single size and
- * double size by the parts ratio from the profile.
- * Both sizes must be a multiplications of the
- * granularity from the profile.
- */
- double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
- double_size *= profile->kvd_hash_double_parts;
- double_size /= profile->kvd_hash_double_parts +
- profile->kvd_hash_single_parts;
- double_size /= profile->kvd_hash_granularity;
- double_size *= profile->kvd_hash_granularity;
- single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
- linear_size;
-
- /* Check results are legal. */
- if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
- double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
- MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
- return -EIO;
+ err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+ &single_size, &double_size,
+ &linear_size);
+ if (err)
+ return err;
MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mbox, profile->adaptive_routing_group_cap);
}
if (MLXSW_RES_VALID(res, KVD_SIZE)) {
- err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+ err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
return err;
}
-static const struct mlxsw_bus mlxsw_pci_bus = {
- .kind = "pci",
- .init = mlxsw_pci_init,
- .fini = mlxsw_pci_fini,
- .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
- .skb_transmit = mlxsw_pci_skb_transmit,
- .cmd_exec = mlxsw_pci_cmd_exec,
- .features = MLXSW_BUS_F_TXRX,
-};
-
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
const struct pci_device_id *id)
{
@@ -1643,7 +1614,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
- wmb(); /* reset needs to be written before we read control register */
+ /* Reset needs to be written before we read control register, and
+ * we must wait for the HW to become responsive once again
+ */
+ wmb();
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
@@ -1655,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+ if (err < 0)
+ dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+ return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
+ mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+ .features = MLXSW_BUS_F_TXRX,
+ .reset = mlxsw_pci_reset,
+};
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1716,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_sw_reset;
}
- err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
dev_err(&pdev->dev, "MSI-X init failed\n");
goto err_msix_init;
@@ -1725,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
+ mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
- &mlxsw_pci_bus, mlxsw_pci);
+ &mlxsw_pci_bus, mlxsw_pci, false,
+ NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
@@ -1736,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_msix_init:
err_sw_reset:
iounmap(mlxsw_pci->hw_addr);
@@ -1755,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
{
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
- mlxsw_core_bus_device_unregister(mlxsw_pci->core);
- pci_free_irq_vectors(mlxsw_pci->pdev);
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_free_irq_vectors(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6c4e08b8058a..0e08be41c8e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4827,6 +4827,42 @@ static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
}
+/* RDPM - Router DSCP to Priority Mapping
+ * --------------------------------------
+ * Controls the mapping from DSCP field to switch priority on routed packets
+ */
+#define MLXSW_REG_RDPM_ID 0x8009
+#define MLXSW_REG_RDPM_BASE_LEN 0x00
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN 0x01
+#define MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT 64
+#define MLXSW_REG_RDPM_LEN 0x40
+#define MLXSW_REG_RDPM_LAST_ENTRY (MLXSW_REG_RDPM_BASE_LEN + \
+ MLXSW_REG_RDPM_LEN - \
+ MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN)
+
+MLXSW_REG_DEFINE(rdpm, MLXSW_REG_RDPM_ID, MLXSW_REG_RDPM_LEN);
+
+/* reg_dscp_entry_e
+ * Enable update of the specific entry
+ * Access: Index
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_e, MLXSW_REG_RDPM_LAST_ENTRY, 7, 1,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+/* reg_dscp_entry_prio
+ * Switch Priority
+ * Access: RW
+ */
+MLXSW_ITEM8_INDEXED(reg, rdpm, dscp_entry_prio, MLXSW_REG_RDPM_LAST_ENTRY, 0, 4,
+ -MLXSW_REG_RDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_rdpm_pack(char *payload, unsigned short index,
+ u8 prio)
+{
+ mlxsw_reg_rdpm_dscp_entry_e_set(payload, index, 1);
+ mlxsw_reg_rdpm_dscp_entry_prio_set(payload, index, prio);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -7640,6 +7676,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
MLXSW_REG(rrcr),
MLXSW_REG(ralta),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d0897b7d860..3dcc58d61506 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -76,12 +76,7 @@
#define MLXSW_FWREV_MAJOR 13
#define MLXSW_FWREV_MINOR 1530
#define MLXSW_FWREV_SUBMINOR 152
-
-static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
- .major = MLXSW_FWREV_MAJOR,
- .minor = MLXSW_FWREV_MINOR,
- .subminor = MLXSW_FWREV_SUBMINOR
-};
+#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
@@ -339,28 +334,25 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
}
-static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
- const struct mlxsw_fw_rev *b)
-{
- if (a->major != b->major)
- return a->major > b->major;
- if (a->minor != b->minor)
- return a->minor > b->minor;
- return a->subminor >= b->subminor;
-}
-
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct firmware *firmware;
int err;
- if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ /* Validate driver & FW are compatible */
+ if (rev->major != MLXSW_FWREV_MAJOR) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, MLXSW_FWREV_MAJOR);
+ return -EINVAL;
+ }
+ if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+ MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
return 0;
- dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
rev->major, rev->minor, rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
MLXSW_SP_FW_FILENAME);
err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
@@ -576,7 +568,7 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-static struct mlxsw_sp_span_entry *
+struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
int i;
@@ -677,13 +669,28 @@ mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
- struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
char mpar_pl[MLXSW_REG_MPAR_LEN];
- char sbib_pl[MLXSW_REG_SBIB_LEN];
int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
int err;
/* if it is an egress SPAN, bind a shared buffer to it */
@@ -699,12 +706,12 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
}
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
- if (err)
- goto err_mpar_reg_write;
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
if (!inspected_port) {
@@ -717,8 +724,11 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
return 0;
-err_mpar_reg_write:
err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
@@ -727,25 +737,22 @@ err_inspected_port_alloc:
}
static void
-mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type)
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
{
struct mlxsw_sp_span_inspected_port *inspected_port;
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
char sbib_pl[MLXSW_REG_SBIB_LEN];
- int pa_id = span_entry->id;
inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
if (!inspected_port)
return;
- /* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
/* remove the SBIB buffer if it was egress SPAN */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
@@ -758,9 +765,9 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
kfree(inspected_port);
}
-static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type)
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
struct mlxsw_sp_span_entry *span_entry;
@@ -773,7 +780,7 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
span_entry->id);
- err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
if (err)
goto err_port_bind;
@@ -784,9 +791,8 @@ err_port_bind:
return err;
}
-static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type)
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
+ enum mlxsw_sp_span_type type, bool bind)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -799,7 +805,7 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
span_entry->id);
- mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1571,14 +1577,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
const struct tc_action *a,
bool ingress)
{
- struct net *net = dev_net(mlxsw_sp_port->dev);
enum mlxsw_sp_span_type span_type;
struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
- int ifindex;
- ifindex = tcf_mirred_ifindex(a);
- to_dev = __dev_get_by_index(net, ifindex);
+ to_dev = tcf_mirred_dev(a);
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
@@ -1593,7 +1596,8 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
+ true);
}
static void
@@ -1604,8 +1608,8 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
- span_type);
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ span_type, true);
}
static int
@@ -1734,9 +1738,6 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (f->common.chain_index)
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
@@ -1750,72 +1751,187 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f,
- bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+ struct tc_cls_flower_offload *f)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
- return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
case TC_CLSFLOWER_DESTROY:
- mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+ mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
return 0;
case TC_CLSFLOWER_STATS:
- return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+ return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
- if (!tc_can_offload(mlxsw_sp_port->dev))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSMATCHALL:
+ if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
+ type_data))
+ return -EOPNOTSUPP;
+
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
ingress);
case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
- ingress);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+ cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return 0;
+ case TC_SETUP_CLSFLOWER:
+ if (mlxsw_sp_acl_block_disabled(acl_block))
+ return -EOPNOTSUPP;
+
+ return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb) {
+ acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+ if (!acl_block)
+ return -ENOMEM;
+ block_cb = __tcf_block_cb_register(block,
+ mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp, acl_block);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto err_cb_register;
+ }
+ } else {
+ acl_block = tcf_block_cb_priv(block_cb);
+ }
+ tcf_block_cb_incref(block_cb);
+ err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (err)
+ goto err_block_bind;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = acl_block;
+ else
+ mlxsw_sp_port->eg_acl_block = acl_block;
+
+ return 0;
+
+err_block_bind:
+ if (!tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+err_cb_register:
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
+ return err;
}
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tcf_block *block, bool ingress)
{
- return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct tcf_block_cb *block_cb;
+ int err;
+
+ block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+ mlxsw_sp);
+ if (!block_cb)
+ return;
+
+ if (ingress)
+ mlxsw_sp_port->ing_acl_block = NULL;
+ else
+ mlxsw_sp_port->eg_acl_block = NULL;
+
+ acl_block = tcf_block_cb_priv(block_cb);
+ err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+ mlxsw_sp_port, ingress);
+ if (!err && !tcf_block_cb_decref(block_cb)) {
+ __tcf_block_cb_unregister(block_cb);
+ mlxsw_sp_acl_block_destroy(acl_block);
+ }
}
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_block_offload *f)
{
tc_setup_cb_t *cb;
+ bool ingress;
+ int err;
- if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_ig;
- else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = mlxsw_sp_setup_tc_block_cb_eg;
- else
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ ingress = true;
+ } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ ingress = false;
+ } else {
return -EOPNOTSUPP;
+ }
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
- mlxsw_sp_port);
+ err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+ f->block, ingress);
+ if (err) {
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return err;
+ }
+ return 0;
case TC_BLOCK_UNBIND:
+ mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+ f->block, ingress);
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
return 0;
default:
@@ -1833,11 +1949,69 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_RED:
return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_PRIO:
+ return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
}
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ if (!enable) {
+ if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+ mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+ !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+ netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+ } else {
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+ mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
+ }
+ return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlxsw_sp_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ dev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(dev, enable);
+ if (err) {
+ netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ enable ? "Enable" : "Disable", &feature, err);
+ return err;
+ }
+
+ if (enable)
+ dev->features |= feature;
+ else
+ dev->features &= ~feature;
+
+ return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ mlxsw_sp_feature_hw_tc);
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1852,6 +2026,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
+ .ndo_set_features = mlxsw_sp_set_features,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3214,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_fids_init;
}
+ err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_qdiscs_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3249,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3286,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -3933,6 +4118,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+ u64 size)
+{
+ const struct mlxsw_config_profile *profile;
+
+ profile = &mlxsw_sp_config_profile;
+ if (size % profile->kvd_hash_granularity) {
+ NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+ return false;
+ }
+ return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+ return -EINVAL;
+
+ if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+ .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+ .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+ u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_SINGLE_MIN_SIZE);
+ u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+ KVD_DOUBLE_MIN_SIZE);
+ u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ u32 linear_size_min = 0;
+
+ /* KVD top resource */
+ mlxsw_sp_kvd_size_params.size_min = kvd_size;
+ mlxsw_sp_kvd_size_params.size_max = kvd_size;
+ mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Linear part init */
+ mlxsw_sp_linear_size_params.size_min = linear_size_min;
+ mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+ double_size_min;
+ mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash double part init */
+ mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+ mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+ /* Hash single part init */
+ mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+ mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+ linear_size_min;
+ mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+ mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 kvd_size, single_size, double_size, linear_size;
+ const struct mlxsw_config_profile *profile;
+ int err;
+
+ profile = &mlxsw_sp_config_profile;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ true, kvd_size,
+ MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &mlxsw_sp_kvd_size_params,
+ &mlxsw_sp_resource_kvd_ops);
+ if (err)
+ return err;
+
+ linear_size = profile->kvd_linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ false, linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_linear_size_params,
+ &mlxsw_sp_resource_kvd_linear_ops);
+ if (err)
+ return err;
+
+ double_size = kvd_size - linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ false, double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_double_size_params,
+ &mlxsw_sp_resource_kvd_hash_double_ops);
+ if (err)
+ return err;
+
+ single_size = kvd_size - double_size - linear_size;
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ false, single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &mlxsw_sp_hash_single_size_params,
+ &mlxsw_sp_resource_kvd_hash_single_ops);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile,
+ u64 *p_single_size, u64 *p_double_size,
+ u64 *p_linear_size)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ u32 double_size;
+ int err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile. In case the user
+ * provided the sizes they are obtained via devlink.
+ */
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
+ if (err)
+ *p_linear_size = profile->kvd_linear_size;
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
+ if (err) {
+ double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_linear_size;
+ double_size *= profile->kvd_hash_double_parts;
+ double_size /= profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts;
+ *p_double_size = rounddown(double_size,
+ profile->kvd_hash_granularity);
+ }
+
+ err = devlink_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
+ if (err)
+ *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ *p_double_size - *p_linear_size;
+
+ /* Check results are legal. */
+ if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+ *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+ return -EIO;
+
+ return 0;
+}
+
static struct mlxsw_driver mlxsw_sp_driver = {
.kind = mlxsw_sp_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -3952,6 +4384,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp_resources_register,
+ .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
};
@@ -4300,6 +4734,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid = 1;
int err;
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4747,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
true, false);
if (err)
goto err_port_vlan_set;
+
+ for (; vid <= VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, false);
+ if (err)
+ goto err_vid_learning_set;
+ }
+
return 0;
+err_vid_learning_set:
+ for (vid--; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
err_port_stp_set:
@@ -4323,6 +4769,12 @@ err_port_stp_set:
static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid;
+
+ for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, true);
+
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
@@ -4358,7 +4810,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack,
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
@@ -4486,6 +4941,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
@@ -4502,7 +4958,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b12b7f..bdd8f94a452c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+ MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
@@ -204,29 +216,6 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
-enum mlxsw_sp_qdisc_type {
- MLXSW_SP_QDISC_NO_QDISC,
- MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
- u32 handle;
- enum mlxsw_sp_qdisc_type type;
- struct red_stats xstats_base;
- union {
- struct {
- u64 tail_drop_base;
- u64 ecn_base;
- u64 wred_drop_base;
- } red;
- } xstats;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 drops;
- u64 overlimits;
-};
-
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
@@ -269,7 +258,10 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
- struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc;
+ unsigned acl_rule_count;
+ struct mlxsw_sp_acl_block *ing_acl_block;
+ struct mlxsw_sp_acl_block *eg_acl_block;
};
static inline bool
@@ -365,6 +357,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -402,6 +396,16 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
+ u8 destination_port,
+ enum mlxsw_sp_span_type type,
+ bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -456,13 +460,13 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
unsigned int counter_index;
- bool counter_valid;
};
enum mlxsw_sp_acl_profile {
@@ -475,8 +479,11 @@ struct mlxsw_sp_acl_profile_ops {
void *priv, void *ruleset_priv);
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
- struct net_device *dev, bool ingress);
- void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+ void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
u16 (*ruleset_group_id)(void *ruleset_priv);
size_t rule_priv_size;
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -496,17 +503,34 @@ struct mlxsw_sp_acl_ops {
enum mlxsw_sp_acl_profile profile);
};
+struct mlxsw_sp_acl_block;
struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset);
@@ -530,6 +554,10 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct net_device *out_dev);
@@ -573,16 +601,23 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
/* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f);
/* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p);
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd315f7d6..0897a5435cc2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
#include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
-struct mlxsw_sp_acl_ruleset_ht_key {
- struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+ struct list_head list;
+ struct net_device *dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+ struct list_head binding_list;
+ struct mlxsw_sp_acl_ruleset *ruleset_zero;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned int rule_count;
+ unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+ struct mlxsw_sp_acl_block *block;
u32 chain_index;
const struct mlxsw_sp_acl_profile_ops *ops;
};
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp->acl->dummy_fid;
}
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+ return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+ return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+ if (block)
+ block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+ return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_acl_block_binding *binding)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+ ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+ binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+
+ return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset,
+ struct mlxsw_sp_acl_block *block)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+ block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+ struct net *net)
+{
+ struct mlxsw_sp_acl_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->mlxsw_sp = mlxsw_sp;
+ return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+ binding->ingress == ingress)
+ return binding;
+ return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->mlxsw_sp_port = mlxsw_sp_port;
+ binding->ingress = ingress;
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+ err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+
+ binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (mlxsw_sp_acl_ruleset_block_bound(block))
+ mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+ kfree(binding);
+ return 0;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->ref_count = 1;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,68 +336,50 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ops_ruleset_add;
- return ruleset;
-
-err_ops_ruleset_add:
- rhashtable_destroy(&ruleset->rule_ht);
-err_rhashtable_init:
- kfree(ruleset);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-
- ops->ruleset_del(mlxsw_sp, ruleset->priv);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset,
- struct net_device *dev, bool ingress,
- u32 chain_index)
-{
- const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- int err;
-
- ruleset->ht_key.dev = dev;
- ruleset->ht_key.ingress = ingress;
- ruleset->ht_key.chain_index = chain_index;
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
if (err)
- return err;
- if (!ruleset->ht_key.chain_index) {
+ goto err_ht_insert;
+
+ if (!chain_index) {
/* We only need ruleset with chain index 0, the implicit one,
* to be directly bound to device. The rest of the rulesets
* are bound by "Goto action set".
*/
- err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
- goto err_ops_ruleset_bind;
+ goto err_ruleset_bind;
}
- return 0;
-err_ops_ruleset_bind:
+ return ruleset;
+
+err_ruleset_bind:
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
- return err;
+err_ht_insert:
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+err_ops_ruleset_add:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
}
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_ruleset *ruleset)
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!ruleset->ht_key.chain_index)
- ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+ if (!chain_index)
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
+ ops->ruleset_del(mlxsw_sp, ruleset->priv);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
}
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
{
if (--ruleset->ref_count)
return;
- mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
- bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
const struct mlxsw_sp_acl_profile_ops *ops)
{
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
- ht_key.dev = dev;
- ht_key.ingress = ingress;
+ ht_key.block = block;
ht_key.chain_index = chain_index;
ht_key.ops = ops;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (!ruleset)
return ERR_PTR(-ENOENT);
return ruleset;
}
struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
- bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block, u32 chain_index,
enum mlxsw_sp_acl_profile profile)
{
const struct mlxsw_sp_acl_profile_ops *ops;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
struct mlxsw_sp_acl_ruleset *ruleset;
- int err;
ops = acl->ops->profile_ops(mlxsw_sp, profile);
if (!ops)
return ERR_PTR(-EINVAL);
- ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
- chain_index, ops);
+ ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
if (ruleset) {
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
return ruleset;
}
- ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
- if (IS_ERR(ruleset))
- return ruleset;
- err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
- ingress, chain_index);
- if (err)
- goto err_ruleset_bind;
- return ruleset;
-
-err_ruleset_bind:
- mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
- return ERR_PTR(err);
+ return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
}
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -302,27 +462,6 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
return ops->ruleset_group_id(ruleset->priv);
}
-static int
-mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- int err;
-
- err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
- if (err)
- return err;
- rulei->counter_valid = true;
- return 0;
-}
-
-static void
-mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_rule_info *rulei)
-{
- rulei->counter_valid = false;
- mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
-}
-
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
@@ -427,6 +566,34 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
local_port, in_port);
}
+int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_block *block,
+ struct net_device *out_dev)
+{
+ struct mlxsw_sp_acl_block_binding *binding;
+ struct mlxsw_sp_port *out_port;
+ struct mlxsw_sp_port *in_port;
+
+ if (!list_is_singular(&block->binding_list))
+ return -EOPNOTSUPP;
+
+ binding = list_first_entry(&block->binding_list,
+ struct mlxsw_sp_acl_block_binding, list);
+ in_port = binding->mlxsw_sp_port;
+ if (!mlxsw_sp_port_dev_check(out_dev))
+ return -EINVAL;
+
+ out_port = netdev_priv(out_dev);
+ if (out_port->mlxsw_sp != mlxsw_sp)
+ return -EINVAL;
+
+ return mlxsw_afa_block_append_mirror(rulei->act_block,
+ in_port->local_port,
+ out_port->local_port,
+ binding->ingress);
+}
+
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio)
@@ -459,7 +626,7 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
- rulei->counter_index);
+ &rulei->counter_index);
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -493,13 +660,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
- if (err)
- goto err_counter_alloc;
return rule;
-err_counter_alloc:
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
@@ -512,7 +674,6 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
@@ -535,6 +696,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+ ruleset->ht_key.block->rule_count++;
return 0;
err_rhashtable_insert:
@@ -548,6 +710,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 4d3340ed0291..6ca6894125f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -108,11 +108,77 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
}
+static int
+mlxsw_sp_act_counter_index_get(void *priv, unsigned int *p_counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ return mlxsw_sp_flow_counter_alloc(mlxsw_sp, p_counter_index);
+}
+
+static void
+mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_flow_counter_free(mlxsw_sp, counter_index);
+}
+
+static int
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress, int *p_span_id)
+{
+ struct mlxsw_sp_port *in_port, *out_port;
+ struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp *mlxsw_sp = priv;
+ enum mlxsw_sp_span_type type;
+ int err;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ out_port = mlxsw_sp->ports[local_out_port];
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
+ if (!span_entry) {
+ err = -ENOENT;
+ goto err_span_entry_find;
+ }
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_span_entry_find:
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ return err;
+}
+
+static void
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
+ bool ingress)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *in_port;
+ enum mlxsw_sp_span_type type;
+
+ type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ in_port = mlxsw_sp->ports[local_in_port];
+
+ mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+}
+
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
.kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
.kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
.kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
.kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+ .counter_index_get = mlxsw_sp_act_counter_index_get,
+ .counter_index_put = mlxsw_sp_act_counter_index_put,
+ .mirror_add = mlxsw_sp_act_mirror_add,
+ .mirror_del = mlxsw_sp_act_mirror_del,
};
int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b46968..c6e180c2be1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
struct list_head region_list;
unsigned int region_count;
struct rhashtable chunk_ht;
- struct {
- u16 local_port;
- bool ingress;
- } bound;
struct mlxsw_sp_acl_tcam_group_ops *ops;
const struct mlxsw_sp_acl_tcam_pattern *patterns;
unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_group *group,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
- struct mlxsw_sp_port *mlxsw_sp_port;
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- if (!mlxsw_sp_port_dev_check(dev))
- return -EINVAL;
-
- mlxsw_sp_port = netdev_priv(dev);
- group->bound.local_port = mlxsw_sp_port->local_port;
- group->bound.ingress = ingress;
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
group->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam_group *group)
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
char ppbt_pl[MLXSW_REG_PPBT_LEN];
- mlxsw_reg_ppbt_pack(ppbt_pl,
- group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
- MLXSW_REG_PXBT_E_EACL,
- MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+ mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+ MLXSW_REG_PXBT_E_EACL,
+ MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
group->id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
void *ruleset_priv,
- struct net_device *dev, bool ingress)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
- dev, ingress);
+ mlxsw_sp_port, ingress);
}
static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
- void *ruleset_priv)
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
{
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+ mlxsw_sp_port, ingress);
}
static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba78acab..f56fa18d6b26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.size_get = mlxsw_sp_dpipe_table_host4_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ return err;
}
static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.size_get = mlxsw_sp_dpipe_table_host6_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ return err;
}
static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.size_get = mlxsw_sp_dpipe_table_adj_size_get,
};
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+ if (err)
+ return err;
+
+ err = devlink_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ if (err)
+ goto err_resource_set;
+
+ return 0;
+
+err_resource_set:
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ return err;
}
static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e57857ea4..6ce00e28d4ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <net/net_namespace.h>
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
#include "core_acl_flex_keys.h"
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tcf_exts *exts)
{
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
- ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
- ingress,
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -104,14 +103,18 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = __dev_get_by_index(dev_net(dev), ifindex);
- if (out_dev == dev)
- out_dev = NULL;
-
+ out_dev = tcf_mirred_dev(a);
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev);
if (err)
return err;
+ } else if (is_tcf_mirred_egress_mirror(a)) {
+ struct net_device *out_dev = tcf_mirred_dev(a);
+
+ err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
+ block, out_dev);
+ if (err)
+ return err;
} else if (is_tcf_vlan(a)) {
u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
u32 action = tcf_vlan_action(a);
@@ -266,7 +269,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev, bool ingress,
+ struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
@@ -384,21 +387,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
- rulei, f->exts);
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
}
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_acl_rule_info *rulei;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
@@ -411,7 +412,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
}
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+ err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
if (err)
goto err_flower_parse;
@@ -435,15 +436,15 @@ err_rule_create:
return err;
}
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (IS_ERR(ruleset))
return;
@@ -457,10 +458,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_block *block,
struct tc_cls_flower_offload *f)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -468,8 +469,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
u64 bytes;
int err;
- ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
- ingress, f->common.chain_index,
+ ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+ f->common.chain_index,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c38247b5c..55f9d2d70f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
}
+static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+ unsigned int nr_entries;
+ int bit = -1;
+ u64 occ = 0;
+
+ nr_entries = (part->info->end_index -
+ part->info->start_index + 1) /
+ part->info->alloc_size;
+ while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+ < nr_entries)
+ occ += part->info->alloc_size;
+ return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl_part *part;
+ u64 occ = 0;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+ occ += mlxsw_sp_kvdl_part_occ(part);
+
+ return occ;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 34a0b632e5dd..4c7f32d4288d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -243,7 +243,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
if (!afa_block)
return ERR_PTR(-ENOMEM);
- err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ err = mlxsw_afa_block_append_allocated_counter(afa_block,
+ counter_index);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..0b7670459051 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,12 +41,157 @@
#include "spectrum.h"
#include "reg.h"
+#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+ MLXSW_SP_QDISC_PRIO,
+};
+
+struct mlxsw_sp_qdisc_ops {
+ enum mlxsw_sp_qdisc_type type;
+ int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params);
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+ int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr);
+ int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr);
+ void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+ /* unoffload - to be used for a qdisc that stops being offloaded without
+ * being destroyed.
+ */
+ void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ u8 tclass_num;
+ union {
+ struct red_stats red;
+ } xstats_base;
+ struct mlxsw_sp_qdisc_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+ u64 backlog;
+ } stats_base;
+
+ struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+ enum mlxsw_sp_qdisc_type type)
+{
+ return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->type == type &&
+ mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int err = 0;
+
+ if (!mlxsw_sp_qdisc)
+ return 0;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+ err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->ops = NULL;
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+ /* In case this location contained a different qdisc of the
+ * same type we can override the old qdisc configuration.
+ * Otherwise, we need to remove the old qdisc before setting the
+ * new one.
+ */
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_bad_param;
+
+ err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ if (err)
+ goto err_config;
+
+ if (mlxsw_sp_qdisc->handle != handle) {
+ mlxsw_sp_qdisc->ops = ops;
+ if (ops->clean_stats)
+ ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+ }
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+err_config:
+ if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
+ ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_stats)
+ return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ stats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *xstats_ptr)
+{
+ if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+ mlxsw_sp_qdisc->ops->get_xstats)
+ return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ xstats_ptr);
+
+ return -EOPNOTSUPP;
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
u32 probability, bool is_ecn)
{
- char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+ char cwtp_cmd[MLXSW_REG_CWTP_LEN];
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
@@ -60,10 +205,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
static int
@@ -79,80 +224,78 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
}
static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
+ struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+ red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
- mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
- switch (mlxsw_sp_qdisc->type) {
- case MLXSW_SP_QDISC_RED:
- xstats_base->prob_mark = xstats->ecn;
- xstats_base->prob_drop = xstats->wred_drop[tclass_num];
- xstats_base->pdrop = xstats->tail_drop[tclass_num];
+ red_base->prob_mark = xstats->ecn;
+ red_base->prob_drop = xstats->wred_drop[tclass_num];
+ red_base->pdrop = xstats->tail_drop[tclass_num];
- mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
- xstats_base->prob_mark;
- mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
- xstats_base->pdrop;
- break;
- default:
- break;
- }
+ stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+ stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+ stats_base->backlog = 0;
}
static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int err;
-
- if (mlxsw_sp_qdisc->handle != handle)
- return 0;
-
- err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
- mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
- return err;
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+ mlxsw_sp_qdisc->tclass_num);
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u32 min, max;
- u64 prob;
- int err = 0;
+ struct tc_red_qopt_offload_params *p = params;
if (p->min > p->max) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: min %u is bigger then max %u\n", p->min,
p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
- goto err_bad_param;
+ return -EINVAL;
}
if (p->min == 0 || p->max == 0) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: 0 value is illegal for min and max\n");
- goto err_bad_param;
+ return -EINVAL;
}
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct tc_red_qopt_offload_params *p = params;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ u32 min, max;
+ u64 prob;
/* calculate probability in percentage */
prob = p->probability;
@@ -161,116 +304,309 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
- if (err)
- goto err_config;
-
- mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
- if (mlxsw_sp_qdisc->handle != handle)
- mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
- mlxsw_sp_qdisc,
- tclass_num);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+}
- mlxsw_sp_qdisc->handle = handle;
- return 0;
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_red_qopt_offload_params *p = params;
+ u64 backlog;
-err_bad_param:
- err = -EINVAL;
-err_config:
- mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
- mlxsw_sp_qdisc, tclass_num);
- return err;
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
}
static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num, struct red_stats *res)
+ void *xstats_ptr)
{
- struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
-
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
+ struct red_stats *res = xstats_ptr;
+ int early_drops, marks, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- res->prob_mark = xstats->ecn - xstats_base->prob_mark;
- res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->ecn - xstats_base->prob_mark;
+ pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+ res->pdrop += pdrops;
+ res->prob_drop += early_drops;
+ res->prob_mark += marks;
+
+ xstats_base->pdrop += pdrops;
+ xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- int tclass_num,
- struct tc_red_qopt_offload_stats *res)
+ struct tc_qopt_offload_stats *stats_ptr)
{
- u64 tx_bytes, tx_packets, overlimits, drops;
+ u64 tx_bytes, tx_packets, overlimits, drops, backlog;
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+ struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct rtnl_link_stats64 *stats;
- if (mlxsw_sp_qdisc->handle != handle ||
- mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
- return -EOPNOTSUPP;
-
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
- tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- mlxsw_sp_qdisc->overlimits;
+ stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
- mlxsw_sp_qdisc->drops;
-
- _bstats_update(res->bstats, tx_bytes, tx_packets);
- res->qstats->overlimits += overlimits;
- res->qstats->drops += drops;
- res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
- xstats->backlog[tclass_num]);
-
- mlxsw_sp_qdisc->drops += drops;
- mlxsw_sp_qdisc->overlimits += overlimits;
- mlxsw_sp_qdisc->tx_bytes += tx_bytes;
- mlxsw_sp_qdisc->tx_packets += tx_packets;
+ stats_base->drops;
+ backlog = xstats->backlog[tclass_num];
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->overlimits += overlimits;
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->overlimits += overlimits;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
return 0;
}
#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+ .type = MLXSW_SP_QDISC_RED,
+ .check_params = mlxsw_sp_qdisc_red_check_params,
+ .replace = mlxsw_sp_qdisc_red_replace,
+ .unoffload = mlxsw_sp_qdisc_red_unoffload,
+ .destroy = mlxsw_sp_qdisc_red_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_red_stats,
+ .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- int tclass_num;
if (p->parent != TC_H_ROOT)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
- tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (p->command == TC_RED_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_red,
+ &p->set);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_RED))
+ return -EOPNOTSUPP;
switch (p->command) {
- case TC_RED_REPLACE:
- return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->set);
case TC_RED_DESTROY:
- return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num);
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
case TC_RED_XSTATS:
- return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- p->xstats);
+ return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->xstats);
case TC_RED_STATS:
- return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
- mlxsw_sp_qdisc, tclass_num,
- &p->stats);
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ MLXSW_SP_PORT_DEFAULT_TCLASS);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+
+ if (p->bands > IEEE_8021QAZ_MAX_TCS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ int tclass, i;
+ int err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ struct tc_prio_qopt_offload_params *p = params;
+ u64 backlog;
+
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_qdisc->stats_base.backlog);
+ p->qstats->backlog -= backlog;
+}
+
+static int
+mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+ tx_packets = stats->tx_packets - stats_base->tx_packets;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ drops += xstats->tail_drop[i];
+ backlog += xstats->backlog[i];
+ }
+ drops = drops - stats_base->drops;
+
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+ stats_ptr->qstats->drops += drops;
+ stats_ptr->qstats->backlog +=
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ backlog) -
+ mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ stats_base->backlog);
+ stats_base->backlog = backlog;
+ stats_base->drops += drops;
+ stats_base->tx_bytes += tx_bytes;
+ stats_base->tx_packets += tx_packets;
+ return 0;
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_stats *stats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+ int i;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+ stats_base = &mlxsw_sp_qdisc->stats_base;
+
+ stats_base->tx_packets = stats->tx_packets;
+ stats_base->tx_bytes = stats->tx_bytes;
+
+ stats_base->drops = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ stats_base->drops += xstats->tail_drop[i];
+
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
+ .type = MLXSW_SP_QDISC_PRIO,
+ .check_params = mlxsw_sp_qdisc_prio_check_params,
+ .replace = mlxsw_sp_qdisc_prio_replace,
+ .unoffload = mlxsw_sp_qdisc_prio_unoffload,
+ .destroy = mlxsw_sp_qdisc_prio_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+ if (p->command == TC_PRIO_REPLACE)
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_prio,
+ &p->replace_params);
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_PRIO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_PRIO_DESTROY:
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+ case TC_PRIO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
default:
return -EOPNOTSUPP;
}
}
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->root_qdisc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 72ef4f8025f0..f0b25baba09a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
@@ -84,6 +85,8 @@ struct mlxsw_sp_router {
struct rhashtable nexthop_ht;
struct list_head nexthop_list;
struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
} lpm;
@@ -162,6 +165,15 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
};
+static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
+static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree);
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib,
+ u8 tree_id);
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fib *fib);
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -349,14 +361,6 @@ mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}
-static bool
-mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
-{
- struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
-
- return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
-}
-
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
struct mlxsw_sp_prefix_usage *prefix_usage2)
@@ -398,7 +402,6 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
-struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -445,6 +448,7 @@ struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
unsigned int ref_count;
enum mlxsw_sp_l3proto proto;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -453,8 +457,6 @@ struct mlxsw_sp_fib {
struct list_head node_list;
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
- unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
- struct mlxsw_sp_prefix_usage prefix_usage;
enum mlxsw_sp_l3proto proto;
};
@@ -469,12 +471,15 @@ struct mlxsw_sp_vr {
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
-static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
return ERR_PTR(-ENOMEM);
@@ -484,17 +489,26 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
INIT_LIST_HEAD(&fib->node_list);
fib->proto = proto;
fib->vr = vr;
+ fib->lpm_tree = lpm_tree;
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
+ if (err)
+ goto err_lpm_tree_bind;
return fib;
+err_lpm_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_rhashtable_init:
kfree(fib);
return ERR_PTR(err);
}
-static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib *fib)
{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
WARN_ON(!list_empty(&fib->node_list));
- WARN_ON(fib->lpm_tree);
rhashtable_destroy(&fib->ht);
kfree(fib);
}
@@ -581,6 +595,9 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
sizeof(lpm_tree->prefix_usage));
+ memset(&lpm_tree->prefix_ref_count, 0,
+ sizeof(lpm_tree->prefix_ref_count));
+ lpm_tree->ref_count = 1;
return lpm_tree;
err_left_struct_set:
@@ -607,8 +624,10 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
- prefix_usage))
+ prefix_usage)) {
+ mlxsw_sp_lpm_tree_hold(lpm_tree);
return lpm_tree;
+ }
}
return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
@@ -629,9 +648,10 @@ static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
struct mlxsw_sp_lpm_tree *lpm_tree;
u64 max_trees;
- int i;
+ int err, i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
return -EIO;
@@ -649,11 +669,42 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv4_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_ipv6_tree_get;
+ }
+ mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
+
return 0;
+
+err_ipv6_tree_get:
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+err_ipv4_tree_get:
+ kfree(mlxsw_sp->router->lpm.trees);
+ return err;
}
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
kfree(mlxsw_sp->router->lpm.trees);
}
@@ -745,10 +796,10 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+ vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+ vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
if (IS_ERR(vr->fib6)) {
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
@@ -763,21 +814,22 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
return vr;
err_mr_table_create:
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
err_fib6_create:
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
return ERR_PTR(err);
}
-static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
{
mlxsw_sp_mr_table_destroy(vr->mr4_table);
vr->mr4_table = NULL;
- mlxsw_sp_fib_destroy(vr->fib6);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
- mlxsw_sp_fib_destroy(vr->fib4);
+ mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
vr->fib4 = NULL;
}
@@ -793,12 +845,12 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
return vr;
}
-static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
mlxsw_sp_mr_table_empty(vr->mr4_table))
- mlxsw_sp_vr_destroy(vr);
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
static bool
@@ -809,7 +861,7 @@ mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
if (!mlxsw_sp_vr_is_used(vr))
return false;
- if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
+ if (fib->lpm_tree->id == tree_id)
return true;
return false;
}
@@ -821,27 +873,31 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
int err;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err)
+ goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
return 0;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = old_tree;
+ return err;
}
static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib,
struct mlxsw_sp_lpm_tree *new_tree)
{
- struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
enum mlxsw_sp_l3proto proto = fib->proto;
+ struct mlxsw_sp_lpm_tree *old_tree;
u8 old_id, new_id = new_tree->id;
struct mlxsw_sp_vr *vr;
int i, err;
- if (!old_tree)
- goto no_replace;
+ old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
old_id = old_tree->id;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
@@ -855,6 +911,11 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
goto err_tree_replace;
}
+ memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
+ sizeof(new_tree->prefix_ref_count));
+ mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
+
return 0;
err_tree_replace:
@@ -866,33 +927,6 @@ err_tree_replace:
old_tree);
}
return err;
-
-no_replace:
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
- fib->lpm_tree = new_tree;
- mlxsw_sp_lpm_tree_hold(new_tree);
- return 0;
-}
-
-static void
-mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- struct mlxsw_sp_prefix_usage *req_prefix_usage)
-{
- int i;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
- unsigned char prefix;
-
- if (!mlxsw_sp_vr_is_used(vr))
- continue;
- mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
- mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
- }
}
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
@@ -1934,11 +1968,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
dipn = htonl(dip);
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
neigh_event_send(n, NULL);
@@ -1965,11 +1996,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&nd_tbl, &dip, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
neigh_event_send(n, NULL);
@@ -2436,25 +2464,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif *rif)
-{
- char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
- mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
- rif->rif_index, rif->addr);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
- mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
- rif_list_node)
+ rif_list_node) {
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+ }
}
enum mlxsw_sp_nexthop_type {
@@ -2637,7 +2656,8 @@ struct mlxsw_sp_nexthop_group_cmp_arg {
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
- const struct in6_addr *gw, int ifindex)
+ const struct in6_addr *gw, int ifindex,
+ int weight)
{
int i;
@@ -2645,7 +2665,7 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
const struct mlxsw_sp_nexthop *nh;
nh = &nh_grp->nexthops[i];
- if (nh->ifindex == ifindex &&
+ if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
}
@@ -2664,11 +2684,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct in6_addr *gw;
- int ifindex;
+ int ifindex, weight;
ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
+ weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
- if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
+ if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
+ weight))
return false;
}
@@ -3237,7 +3259,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
{
if (!removing)
nh->should_offload = 1;
- else if (nh->offloaded)
+ else
nh->should_offload = 0;
nh->update = 1;
}
@@ -4199,68 +4221,66 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
}
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib,
struct mlxsw_sp_fib_node *fib_node)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_fib *fib = fib_node->fib;
struct mlxsw_sp_lpm_tree *lpm_tree;
int err;
- /* Since the tree is shared between all virtual routers we must
- * make sure it contains all the required prefix lengths. This
- * can be computed by either adding the new prefix length to the
- * existing prefix usage of a bound tree, or by aggregating the
- * prefix lengths across all virtual routers and adding the new
- * one as well.
- */
- if (fib->lpm_tree)
- mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
- &fib->lpm_tree->prefix_usage);
- else
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
+ if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ goto out;
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
return PTR_ERR(lpm_tree);
- if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
- return 0;
-
err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
if (err)
- return err;
+ goto err_lpm_tree_replace;
+out:
+ lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
return 0;
-}
-static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib *fib)
-{
- if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
- return;
- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
- mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
- fib->lpm_tree = NULL;
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+ return err;
}
-static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_node *fib_node)
{
- unsigned char prefix_len = fib_node->key.prefix_len;
+ struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
struct mlxsw_sp_fib *fib = fib_node->fib;
+ int err;
- if (fib->prefix_ref_count[prefix_len]++ == 0)
- mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-}
+ if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
+ return;
+ /* Try to construct a new LPM tree from the current prefix usage
+ * minus the unused one. If we fail, continue using the old one.
+ */
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
+ mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
+ fib_node->key.prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ fib->proto);
+ if (IS_ERR(lpm_tree))
+ return;
-static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
-{
- unsigned char prefix_len = fib_node->key.prefix_len;
- struct mlxsw_sp_fib *fib = fib_node->fib;
+ err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
+ if (err)
+ goto err_lpm_tree_replace;
+
+ return;
- if (--fib->prefix_ref_count[prefix_len] == 0)
- mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+err_lpm_tree_replace:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
}
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
@@ -4274,12 +4294,10 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
return err;
fib_node->fib = fib;
- err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
+ err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
if (err)
goto err_fib_lpm_tree_link;
- mlxsw_sp_fib_node_prefix_inc(fib_node);
-
return 0;
err_fib_lpm_tree_link:
@@ -4293,8 +4311,7 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_fib *fib = fib_node->fib;
- mlxsw_sp_fib_node_prefix_dec(fib_node);
- mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
+ mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
fib_node->fib = NULL;
mlxsw_sp_fib_node_remove(fib, fib_node);
}
@@ -4333,7 +4350,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
err_fib_node_init:
mlxsw_sp_fib_node_destroy(fib_node);
err_fib_node_create:
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -4346,7 +4363,7 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
mlxsw_sp_fib_node_destroy(fib_node);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static struct mlxsw_sp_fib4_entry *
@@ -4771,7 +4788,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = 1;
+ nh->nh_weight = rt->rt6i_nh_weight;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
@@ -5369,7 +5386,7 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int
@@ -5406,7 +5423,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -6055,7 +6072,7 @@ err_fid_get:
err_rif_alloc:
err_rif_index_alloc:
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
return ERR_PTR(err);
}
@@ -6078,7 +6095,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_put(fid);
kfree(rif);
vr->rif_count--;
- mlxsw_sp_vr_put(vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
}
static void
@@ -6868,7 +6885,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
return 0;
err_loopback_op:
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
return err;
}
@@ -6882,7 +6899,7 @@ static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
--ul_vr->rif_count;
- mlxsw_sp_vr_put(ul_vr);
+ mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
}
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
@@ -7017,6 +7034,24 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
}
#endif
+static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rdpm_pl[MLXSW_REG_RDPM_LEN];
+ unsigned int i;
+
+ MLXSW_REG_ZERO(rdpm, rdpm_pl);
+
+ /* HW is determining switch priority based on DSCP-bits, but the
+ * kernel is still doing that based on the ToS. Since there's a
+ * mismatch in bits we need to make sure to translate the right
+ * value ToS would observe, skipping the 2 least-significant ECN bits.
+ */
+ for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
+ mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
+}
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -7029,6 +7064,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
+ mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
return err;
@@ -7104,6 +7140,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_mp_hash_init;
+ err = mlxsw_sp_dscp_init(mlxsw_sp);
+ if (err)
+ goto err_dscp_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -7113,6 +7153,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_dscp_init:
err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e25ae7..593ad31be749 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
return NULL;
}
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 2fe96f1f3fe5..bd6e9014bc74 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -28,6 +28,7 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include "ks8851.h"
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
* we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ const u8 *mac_addr;
+
+ mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ ks8851_write_mac_addr(dev);
+ return;
+ }
- /* first, try reading what we've got already */
if (ks->rc_ccr & CCR_EEPROM) {
ks8851_read_mac_addr(dev);
if (is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a42433fb6949..b922ab5cedea 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -311,7 +311,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
- int commslot = 0;
+ bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
if (!MACH_IS_MAC)
return -ENODEV;
@@ -322,10 +322,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
- if (macintosh_config->ident == MAC_MODEL_Q630 ||
- macintosh_config->ident == MAC_MODEL_P588 ||
- macintosh_config->ident == MAC_MODEL_P575 ||
- macintosh_config->ident == MAC_MODEL_C610) {
+ if (commslot || macintosh_config->ident == MAC_MODEL_C610) {
int card_present;
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
@@ -333,7 +330,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
printk("none.\n");
return -ENODEV;
}
- commslot = 1;
}
printk("yes\n");
@@ -428,26 +424,26 @@ static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_rsrc *fres)
{
- if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
- ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ fres->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
- if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
- ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ if (fres->dr_hw == NUBUS_DRHW_SONIC &&
+ fres->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
- if (strstr(ndev->board->name, "DuoDock"))
+ if (strstr(fres->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
- if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
- ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 &&
+ fres->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
- if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
- ndev->dr_sw == 0) { /* huh? */
+ if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ fres->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
@@ -456,7 +452,7 @@ static int macsonic_ident(struct nubus_dev *ndev)
static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
@@ -464,9 +460,11 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
- NUBUS_TYPE_ETHERNET, ndev)) != NULL)
- {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 24c4408b5734..d5866d708dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,8 @@ nfp-objs := \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_ctrl.o \
+ nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
@@ -43,6 +45,7 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/cmsg.o \
bpf/main.o \
bpf/offload.o \
bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 000000000000..80d3aa0fc9d3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+ u16 used_tags;
+
+ used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+ return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+ /* All FW communication for BPF is request-reply. To make sure we
+ * don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (nfp_bpf_all_tags_busy(bpf)) {
+ cmsg_warn(bpf, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+ return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+ while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+ bpf->tag_alloc_last != bpf->tag_alloc_next)
+ bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+ struct sk_buff *skb;
+
+ skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ skb_put(skb, size);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += sizeof(struct cmsg_key_value_pair) * n;
+
+ return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+ struct cmsg_hdr *hdr;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&bpf->cmsg_replies, skb) {
+ msg_tag = nfp_bpf_cmsg_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_bpf_free_tag(bpf, tag);
+ __skb_unlink(skb, &bpf->cmsg_replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ skb = __nfp_bpf_reply(bpf, tag);
+ if (!skb)
+ nfp_bpf_free_tag(bpf, tag);
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+ int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_bpf_reply(bpf, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+ skb = nfp_bpf_reply(bpf, tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_bpf_reply_drop_tag(bpf, tag);
+ if (err < 0) {
+ cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+ type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+ enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+ struct cmsg_hdr *hdr;
+ int tag;
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+ tag = nfp_bpf_alloc_tag(bpf);
+ if (tag < 0) {
+ nfp_ctrl_unlock(bpf->app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = CMSG_MAP_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(bpf->app, skb);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ hdr = (struct cmsg_hdr *)skb->data;
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+ skb->len, reply_size);
+ goto err_free;
+ }
+ if (hdr->type != __CMSG_REPLY(type)) {
+ cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ hdr->type, __CMSG_REPLY(type));
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+ struct cmsg_reply_map_simple *reply)
+{
+ static const int res_table[] = {
+ [CMSG_RC_SUCCESS] = 0,
+ [CMSG_RC_ERR_MAP_FD] = -EBADFD,
+ [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
+ [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
+ [CMSG_RC_ERR_MAP_PARSE] = -EIO,
+ [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
+ [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
+ [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
+ };
+ u32 rc;
+
+ rc = be32_to_cpu(reply->rc);
+ if (rc >= ARRAY_SIZE(res_table)) {
+ cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+ return -EIO;
+ }
+
+ return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+ struct cmsg_reply_map_alloc_tbl *reply;
+ struct cmsg_req_map_alloc_tbl *req;
+ struct sk_buff *skb;
+ u32 tid;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->key_size = cpu_to_be32(map->key_size);
+ req->value_size = cpu_to_be32(map->value_size);
+ req->max_entries = cpu_to_be32(map->max_entries);
+ req->map_type = cpu_to_be32(map->map_type);
+ req->map_flags = 0;
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+ sizeof(*reply));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ tid = be32_to_cpu(reply->tid);
+ dev_consume_skb_any(skb);
+
+ return tid;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+ struct cmsg_reply_map_free_tbl *reply;
+ struct cmsg_req_map_free_tbl *req;
+ struct sk_buff *skb;
+ int err;
+
+ skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+ if (!skb) {
+ cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+ return;
+ }
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+ sizeof(*reply));
+ if (IS_ERR(skb)) {
+ cmsg_warn(bpf, "leaking map - I/O error\n");
+ return;
+ }
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+ dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+ enum nfp_bpf_cmsg_type op,
+ u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ struct bpf_map *map = &offmap->map;
+ struct cmsg_reply_map_op *reply;
+ struct cmsg_req_map_op *req;
+ struct sk_buff *skb;
+ int err;
+
+ /* FW messages have no space for more than 32 bits of flags */
+ if (flags >> 32)
+ return -EOPNOTSUPP;
+
+ skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (void *)skb->data;
+ req->tid = cpu_to_be32(nfp_map->tid);
+ req->count = cpu_to_be32(1);
+ req->flags = cpu_to_be32(flags);
+
+ /* Copy inputs */
+ if (key)
+ memcpy(&req->elem[0].key, key, map->key_size);
+ if (value)
+ memcpy(&req->elem[0].value, value, map->value_size);
+
+ skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+ sizeof(*reply) + sizeof(*reply->elem));
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ reply = (void *)skb->data;
+ err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ if (err)
+ goto err_free;
+
+ /* Copy outputs */
+ if (out_key)
+ memcpy(out_key, &reply->elem[0].key, map->key_size);
+ if (out_value)
+ memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+ dev_consume_skb_any(skb);
+
+ return 0;
+err_free:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+ cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(bpf->app->ctrl);
+
+ tag = nfp_bpf_cmsg_get_tag(skb);
+ if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+ cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&bpf->cmsg_replies, skb);
+ wake_up_interruptible_all(&bpf->cmsg_wq);
+
+ nfp_ctrl_unlock(bpf->app->ctrl);
+
+ return;
+err_unlock:
+ nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
new file mode 100644
index 000000000000..cfcc7bcb2c67
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_BPF_FW_H
+#define NFP_BPF_FW_H 1
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+enum bpf_cap_tlv_type {
+ NFP_BPF_CAP_TYPE_FUNC = 1,
+ NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
+ NFP_BPF_CAP_TYPE_MAPS = 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+ __le32 func_id;
+ __le32 func_addr;
+};
+
+struct nfp_bpf_cap_tlv_adjust_head {
+ __le32 flags;
+ __le32 off_min;
+ __le32 off_max;
+ __le32 guaranteed_sub;
+ __le32 guaranteed_add;
+};
+
+#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0)
+
+struct nfp_bpf_cap_tlv_maps {
+ __le32 types;
+ __le32 max_maps;
+ __le32 max_elems;
+ __le32 max_key_sz;
+ __le32 max_val_sz;
+ __le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION 1
+
+enum nfp_bpf_cmsg_type {
+ CMSG_TYPE_MAP_ALLOC = 1,
+ CMSG_TYPE_MAP_FREE = 2,
+ CMSG_TYPE_MAP_LOOKUP = 3,
+ CMSG_TYPE_MAP_UPDATE = 4,
+ CMSG_TYPE_MAP_DELETE = 5,
+ CMSG_TYPE_MAP_GETNEXT = 6,
+ CMSG_TYPE_MAP_GETFIRST = 7,
+ __CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT 7
+#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW 16
+#define CMSG_MAP_VALUE_LW 16
+
+enum nfp_bpf_cmsg_status {
+ CMSG_RC_SUCCESS = 0,
+ CMSG_RC_ERR_MAP_FD = 1,
+ CMSG_RC_ERR_MAP_NOENT = 2,
+ CMSG_RC_ERR_MAP_ERR = 3,
+ CMSG_RC_ERR_MAP_PARSE = 4,
+ CMSG_RC_ERR_MAP_EXIST = 5,
+ CMSG_RC_ERR_MAP_NOMEM = 6,
+ CMSG_RC_ERR_MAP_E2BIG = 7,
+};
+
+struct cmsg_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+ struct cmsg_hdr hdr;
+ __be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+ struct cmsg_hdr hdr;
+ __be32 key_size; /* in bytes */
+ __be32 value_size; /* in bytes */
+ __be32 max_entries;
+ __be32 map_type;
+ __be32 map_flags; /* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+};
+
+struct cmsg_key_value_pair {
+ __be32 key[CMSG_MAP_KEY_LW];
+ __be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+ struct cmsg_hdr hdr;
+ __be32 tid;
+ __be32 count;
+ __be32 flags;
+ struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+ struct cmsg_reply_map_simple reply_hdr;
+ __be32 count;
+ __be32 resv;
+ struct cmsg_key_value_pair elem[0];
+};
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 995e95410b11..56451edf01c2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "NFP net bpf: " fmt
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -66,12 +67,6 @@
next2 = nfp_meta_next(next))
static bool
-nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
- return meta->l.next != &nfp_prog->insns;
-}
-
-static bool
nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return meta->l.prev != &nfp_prog->insns;
@@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
{
- return nfp_prog->start_off + nfp_prog->prog_len;
+ return nfp_prog->prog_len;
}
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+static bool
+nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
{
- return offset - nfp_prog->start_off;
+ /* If there is a recorded error we may have dropped instructions;
+ * that doesn't have to be due to translator bug, and the translation
+ * will fail anyway, so just return OK.
+ */
+ if (nfp_prog->error)
+ return true;
+ return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
}
/* --- Emitters --- */
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
{
enum cmd_ctx_swap ctx;
u64 insn;
@@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
FIELD_PREP(OP_CMD_CNT, size) |
FIELD_PREP(OP_CMD_SIG, sync) |
FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_INDIR, indir) |
FIELD_PREP(OP_CMD_MODE, mode);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
+emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
{
struct nfp_insn_re_regs reg;
int err;
@@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
return;
}
- __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
+ indir);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
+}
+
+static void
+emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+ swreg lreg, swreg rreg, u8 size, bool sync)
+{
+ emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
}
static void
@@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
nfp_prog_push(nfp_prog, insn);
}
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+ enum nfp_relo_type relo)
{
- if (defer > 2) {
+ if (mask == BR_UNC && defer > 2) {
pr_err("BUG: branch defer out of bounds %d\n", defer);
nfp_prog->error = -EFAULT;
return;
}
- __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
}
static void
emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
{
- __emit_br(nfp_prog, mask,
- mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
- BR_CSS_NONE, addr, defer);
+ emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
}
static void
@@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
return;
}
- __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both,
- reg.dst_lmextn, reg.src_lmextn);
+ /* Use reg.dst when destination is No-Dest. */
+ __emit_immed(nfp_prog,
+ swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
+ reg.breg, imm >> 8, width, invert, shift,
+ reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
}
static void
@@ -458,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
}
}
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+ enum nfp_relo_type relo)
+{
+ if (imm > 0xffff) {
+ pr_err("relocation of a large immediate!\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
@@ -490,24 +530,179 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
emit_nop(nfp_prog);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+}
+
+/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
+ * result to @dst from low end.
+ */
static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
- enum br_special special)
+wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
+ u8 offset)
{
- emit_br(nfp_prog, mask, 0, 0);
+ enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
+ u8 mask = (1 << field_len) - 1;
- nfp_prog->prog[nfp_prog->prog_len - 1] |=
- FIELD_PREP(OP_BR_SPECIAL, special);
+ emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
}
-static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ swreg *rega, swreg *regb)
{
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+ if (offset == reg_imm(0)) {
+ *rega = reg_a(src_gpr);
+ *regb = reg_b(src_gpr + 1);
+ return;
+ }
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+ reg_imm(0));
+ *rega = imm_a(nfp_prog);
+ *regb = imm_b(nfp_prog);
}
-static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+/* NFP has Command Push Pull bus which supports bluk memory operations. */
+static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
+ bool descending_seq = meta->ldst_gather_len < 0;
+ s16 len = abs(meta->ldst_gather_len);
+ swreg src_base, off;
+ bool src_40bit_addr;
+ unsigned int i;
+ u8 xfer_num;
+
+ off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+ src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
+ src_base = reg_a(meta->insn.src_reg * 2);
+ xfer_num = round_up(len, 4) / 4;
+
+ if (src_40bit_addr)
+ addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+ &off);
+
+ /* Setup PREV_ALU fields to override memory read length. */
+ if (len > 32)
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+
+ /* Memory read from source addr into transfer-in registers. */
+ emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+ src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+ src_base, off, xfer_num - 1, true, len > 32);
+
+ /* Move from transfer-in to transfer-out. */
+ for (i = 0; i < xfer_num; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
+
+ if (len <= 8) {
+ /* Use single direct_ref write8. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
+ true);
+ } else if (len <= 32 && IS_ALIGNED(len, 4)) {
+ /* Use single direct_ref write32. */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
+ true);
+ } else if (len <= 32) {
+ /* Use single indirect_ref write8. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ len - 1, true);
+ } else if (IS_ALIGNED(len, 4)) {
+ /* Use single indirect_ref write32. */
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 1, true);
+ } else if (len <= 40) {
+ /* Use one direct_ref write32 to write the first 32-bytes, then
+ * another direct_ref write8 to write the remaining bytes.
+ */
+ emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off, 7,
+ true);
+
+ off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
+ imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
+ reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
+ true);
+ } else {
+ /* Use one indirect_ref write32 to write 4-bytes aligned length,
+ * then another direct_ref write8 to write the remaining bytes.
+ */
+ u8 new_off;
+
+ wrp_immed(nfp_prog, reg_none(),
+ CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
+ emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+ reg_a(meta->paired_st->dst_reg * 2), off,
+ xfer_num - 2, true);
+ new_off = meta->paired_st->off + (xfer_num - 1) * 4;
+ off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
+ xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
+ (len & 0x3) - 1, true);
+ }
+
+ /* TODO: The following extra load is to make sure data flow be identical
+ * before and after we do memory copy optimization.
+ *
+ * The load destination register is not guaranteed to be dead, so we
+ * need to make sure it is loaded with the value the same as before
+ * this transformation.
+ *
+ * These extra loads could be removed once we have accurate register
+ * usage information.
+ */
+ if (descending_seq)
+ xfer_num = 0;
+ else if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ xfer_num = xfer_num - 1;
+ else
+ xfer_num = xfer_num - 2;
+
+ switch (BPF_SIZE(meta->insn.code)) {
+ case BPF_B:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 1,
+ IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
+ break;
+ case BPF_H:
+ wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num), 2, (len & 3) ^ 2);
+ break;
+ case BPF_W:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(0));
+ break;
+ case BPF_DW:
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_xfer(xfer_num));
+ wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
+ reg_xfer(xfer_num + 1));
+ break;
+ }
+
+ if (BPF_SIZE(meta->insn.code) != BPF_DW)
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
}
static int
@@ -540,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
}
static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
- u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+ swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{
unsigned int i;
u8 mask, sz;
- /* We load the value from the address indicated in @offset and then
+ /* We load the value from the address indicated in rreg + lreg and then
* mask out the data we don't need. Note: this is little endian!
*/
sz = max(size, 4);
mask = size < 4 ? GENMASK(size - 1, 0) : 0;
- emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
- reg_a(src_gpr), offset, sz / 4 - 1, true);
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+ lreg, rreg, sz / 4 - 1, true);
i = 0;
if (mask)
@@ -570,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
}
static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+ size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, u8 size)
+{
+ swreg rega, regb;
+
+ addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+ return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+ size, CMD_MODE_40b_BA);
+}
+
+static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{
swreg tmp_reg;
@@ -583,7 +798,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
emit_alu(nfp_prog, reg_none(),
plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -596,7 +811,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
/* Check packet length */
tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -975,9 +1190,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
@@ -995,9 +1207,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
@@ -1027,9 +1236,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (swap) {
areg ^= breg;
breg ^= areg;
@@ -1052,6 +1258,136 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
SHF_SC_R_ROT, 16);
}
+static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
+ struct nfp_bpf_cap_adjust_head *adjust_head;
+ u32 ret_einval, end;
+
+ adjust_head = &nfp_prog->bpf->adjust_head;
+
+ /* Optimized version - 5 vs 14 cycles */
+ if (nfp_prog->adjust_head_location != UINT_MAX) {
+ if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
+ return -EINVAL;
+
+ emit_alu(nfp_prog, pptr_reg(nfp_prog),
+ reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* TODO: when adjust head is guaranteed to succeed we can
+ * also eliminate the following if (r0 == 0) branch.
+ */
+
+ return 0;
+ }
+
+ ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
+ end = ret_einval + 2;
+
+ /* We need to use a temp because offset is just a part of the pkt ptr */
+ emit_alu(nfp_prog, tmp,
+ reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
+
+ /* Validate result will fit within FW datapath constraints */
+ emit_alu(nfp_prog, reg_none(),
+ tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+ emit_alu(nfp_prog, reg_none(),
+ reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
+ emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+
+ /* Validate the length is at least ETH_HLEN */
+ emit_alu(nfp_prog, tmp_len,
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, reg_none(),
+ tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
+ emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+ /* Load the ret code */
+ wrp_immed(nfp_prog, reg_both(0), 0);
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ /* Modify the packet metadata */
+ emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
+
+ /* Skip over the -EINVAL ret code (defer 2) */
+ emit_br(nfp_prog, BR_UNC, end, 2);
+
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+ emit_alu(nfp_prog, pv_len(nfp_prog),
+ pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+ /* return -EINVAL target */
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+ return -EINVAL;
+
+ wrp_immed(nfp_prog, reg_both(0), -22);
+ wrp_immed(nfp_prog, reg_both(1), ~0);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ struct bpf_offloaded_map *offmap;
+ struct nfp_bpf_map *nfp_map;
+ bool load_lm_ptr;
+ u32 ret_tgt;
+ s64 lm_off;
+ swreg tid;
+
+ offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+ nfp_map = offmap->dev_priv;
+
+ /* We only have to reload LM0 if the key is not at start of stack */
+ lm_off = nfp_prog->stack_depth;
+ lm_off += meta->arg2.var_off.value + meta->arg2.off;
+ load_lm_ptr = meta->arg2_var_off || lm_off;
+
+ /* Set LM0 to start of key */
+ if (load_lm_ptr)
+ emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+ /* Load map ID into a register, it should actually fit as an immediate
+ * but in case it doesn't deal with it here, not in the delay slots.
+ */
+ tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+ 2, RELO_BR_HELPER);
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+ /* Load map ID into A0 */
+ wrp_mov(nfp_prog, reg_a(0), tid);
+
+ /* Load the return address into B0 */
+ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ /* Reset the LM0 pointer */
+ if (!load_lm_ptr)
+ return 0;
+
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+
+ return 0;
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
@@ -1486,14 +1822,29 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
- meta->insn.dst_reg * 2, size);
+ return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
+
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+ tmp_reg, meta->insn.dst_reg * 2, size);
}
static int
mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
{
+ if (meta->ldst_gather_len)
+ return nfp_cpp_memcpy(nfp_prog, meta);
+
if (meta->ptr.type == PTR_TO_CTX) {
if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return mem_ldx_xdp(nfp_prog, meta, size);
@@ -1508,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value);
+ if (meta->ptr.type == PTR_TO_MAP_VALUE)
+ return mem_ldx_emem(nfp_prog, meta, size);
+
return -EOPNOTSUPP;
}
@@ -1630,8 +1984,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (meta->insn.off < 0) /* TODO */
- return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
@@ -1646,9 +1998,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
or1 = reg_a(insn->dst_reg * 2);
or2 = reg_b(insn->dst_reg * 2 + 1);
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, imm_a(nfp_prog),
@@ -1689,15 +2038,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
meta->skip = true;
return 0;
@@ -1726,9 +2092,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
@@ -1753,9 +2116,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->off < 0) /* TODO */
- return -EOPNOTSUPP;
-
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
@@ -1787,6 +2147,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1797,9 +2177,22 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
}
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ switch (meta->insn.imm) {
+ case BPF_FUNC_xdp_adjust_head:
+ return adjust_head(nfp_prog, meta);
+ case BPF_FUNC_map_lookup_elem:
+ return map_lookup_stack(nfp_prog, meta);
+ default:
+ WARN_ONCE(1, "verifier allowed unsupported function\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
return 0;
}
@@ -1860,6 +2253,10 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
[BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
[BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
+ [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
+ [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
+ [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
+ [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
[BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
[BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
[BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
@@ -1867,99 +2264,64 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
[BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
[BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
+ [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
+ [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
+ [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
+ [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_CALL] = call,
[BPF_JMP | BPF_EXIT] = goto_out,
};
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
- u16 addr_lo, addr_hi;
-
- addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
- addr_hi = offset != addr_lo;
- *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
- *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
- *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
/* --- Assembler logic --- */
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
- struct nfp_insn_meta *meta, *next;
- u32 off, br_idx;
- u32 idx;
+ struct nfp_insn_meta *meta, *jmp_dst;
+ u32 idx, br_idx;
- nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
- br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (list_is_last(&meta->l, &nfp_prog->insns))
+ br_idx = nfp_prog->last_bpf_off;
+ else
+ br_idx = list_next_entry(meta, l)->off - 1;
+
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
return -ELOOP;
}
/* Leave special branches for later */
- if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
continue;
- /* Find the target offset in assembler realm */
- off = meta->insn.off;
- if (!off) {
- pr_err("Fixup found zero offset!!\n");
+ if (!meta->jmp_dst) {
+ pr_err("Non-exit jump doesn't have destination info recorded!!\n");
return -ELOOP;
}
- while (off && nfp_meta_has_next(nfp_prog, next)) {
- next = nfp_meta_next(next);
- off--;
- }
- if (off) {
- pr_err("Fixup found too large jump!! %d\n", off);
- return -ELOOP;
- }
+ jmp_dst = meta->jmp_dst;
- if (next->skip) {
+ if (jmp_dst->skip) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
- for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
- idx <= br_idx; idx++) {
+ for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
- br_set_offset(&nfp_prog->prog[idx], next->off);
+ br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
}
}
- /* Fixup 'goto out's separately, they can be scattered around */
- for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
- enum br_special special;
-
- if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
- continue;
-
- special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
- switch (special) {
- case OP_BR_NORMAL:
- break;
- case OP_BR_GO_OUT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_out);
- break;
- case OP_BR_GO_ABORT:
- br_set_offset(&nfp_prog->prog[br_idx],
- nfp_prog->tgt_abort);
- break;
- }
-
- nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
- }
-
return 0;
}
@@ -1987,7 +2349,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2014,7 +2376,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
emit_shf(nfp_prog, reg_b(2),
reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2033,7 +2395,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2054,7 +2416,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
- emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2105,6 +2467,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
nfp_prog->n_translated++;
}
+ nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
+
nfp_outro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
@@ -2173,6 +2537,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (next.src_reg || next.dst_reg)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
}
}
@@ -2209,40 +2576,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
if (next1.imm != 0x20 || next2.imm != 0x20)
continue;
+ if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
+ meta3->flags & FLAG_INSN_IS_JUMP_DST)
+ continue;
+
meta2->skip = true;
meta3->skip = true;
}
}
+/* load/store pair that forms memory copy sould look like the following:
+ *
+ * ld_width R, [addr_src + offset_src]
+ * st_width [addr_dest + offset_dest], R
+ *
+ * The destination register of load and source register of store should
+ * be the same, load and store should also perform at the same width.
+ * If either of addr_src or addr_dest is stack pointer, we don't do the
+ * CPP optimization as stack is modelled by registers on NFP.
+ */
+static bool
+curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta)
+{
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+
+ if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
+ return false;
+
+ if (ld_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (st_meta->ptr.type != PTR_TO_PACKET)
+ return false;
+
+ if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
+ return false;
+
+ if (ld->dst_reg != st->src_reg)
+ return false;
+
+ /* There is jump to the store insn in this pair. */
+ if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ return true;
+}
+
+/* Currently, we only support chaining load/store pairs if:
+ *
+ * - Their address base registers are the same.
+ * - Their address offsets are in the same order.
+ * - They operate at the same memory width.
+ * - There is no jump into the middle of them.
+ */
+static bool
+curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
+ struct nfp_insn_meta *st_meta,
+ struct bpf_insn *prev_ld,
+ struct bpf_insn *prev_st)
+{
+ u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
+ struct bpf_insn *ld = &ld_meta->insn;
+ struct bpf_insn *st = &st_meta->insn;
+ s16 prev_ld_off, prev_st_off;
+
+ /* This pair is the start pair. */
+ if (!prev_ld)
+ return true;
+
+ prev_size = BPF_LDST_BYTES(prev_ld);
+ curr_size = BPF_LDST_BYTES(ld);
+ prev_ld_base = prev_ld->src_reg;
+ prev_st_base = prev_st->dst_reg;
+ prev_ld_dst = prev_ld->dst_reg;
+ prev_ld_off = prev_ld->off;
+ prev_st_off = prev_st->off;
+
+ if (ld->dst_reg != prev_ld_dst)
+ return false;
+
+ if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
+ return false;
+
+ if (curr_size != prev_size)
+ return false;
+
+ /* There is jump to the head of this pair. */
+ if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
+ return false;
+
+ /* Both in ascending order. */
+ if (prev_ld_off + prev_size == ld->off &&
+ prev_st_off + prev_size == st->off)
+ return true;
+
+ /* Both in descending order. */
+ if (ld->off + curr_size == prev_ld_off &&
+ st->off + curr_size == prev_st_off)
+ return true;
+
+ return false;
+}
+
+/* Return TRUE if cross memory access happens. Cross memory access means
+ * store area is overlapping with load area that a later load might load
+ * the value from previous store, for this case we can't treat the sequence
+ * as an memory copy.
+ */
+static bool
+cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
+ struct nfp_insn_meta *head_st_meta)
+{
+ s16 head_ld_off, head_st_off, ld_off;
+
+ /* Different pointer types does not overlap. */
+ if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
+ return false;
+
+ /* load and store are both PTR_TO_PACKET, check ID info. */
+ if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
+ return true;
+
+ /* Canonicalize the offsets. Turn all of them against the original
+ * base register.
+ */
+ head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
+ head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
+ ld_off = ld->off + head_ld_meta->ptr.off;
+
+ /* Ascending order cross. */
+ if (ld_off > head_ld_off &&
+ head_ld_off < head_st_off && ld_off >= head_st_off)
+ return true;
+
+ /* Descending order cross. */
+ if (ld_off < head_ld_off &&
+ head_ld_off > head_st_off && ld_off <= head_st_off)
+ return true;
+
+ return false;
+}
+
+/* This pass try to identify the following instructoin sequences.
+ *
+ * load R, [regA + offA]
+ * store [regB + offB], R
+ * load R, [regA + offA + const_imm_A]
+ * store [regB + offB + const_imm_A], R
+ * load R, [regA + offA + 2 * const_imm_A]
+ * store [regB + offB + 2 * const_imm_A], R
+ * ...
+ *
+ * Above sequence is typically generated by compiler when lowering
+ * memcpy. NFP prefer using CPP instructions to accelerate it.
+ */
+static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *head_ld_meta = NULL;
+ struct nfp_insn_meta *head_st_meta = NULL;
+ struct nfp_insn_meta *meta1, *meta2;
+ struct bpf_insn *prev_ld = NULL;
+ struct bpf_insn *prev_st = NULL;
+ u8 count = 0;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn *ld = &meta1->insn;
+ struct bpf_insn *st = &meta2->insn;
+
+ /* Reset record status if any of the following if true:
+ * - The current insn pair is not load/store.
+ * - The load/store pair doesn't chain with previous one.
+ * - The chained load/store pair crossed with previous pair.
+ * - The chained load/store pair has a total size of memory
+ * copy beyond 128 bytes which is the maximum length a
+ * single NFP CPP command can transfer.
+ */
+ if (!curr_pair_is_memcpy(meta1, meta2) ||
+ !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
+ prev_st) ||
+ (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
+ head_st_meta) ||
+ head_ld_meta->ldst_gather_len >= 128))) {
+ if (!count)
+ continue;
+
+ if (count > 1) {
+ s16 prev_ld_off = prev_ld->off;
+ s16 prev_st_off = prev_st->off;
+ s16 head_ld_off = head_ld_meta->insn.off;
+
+ if (prev_ld_off < head_ld_off) {
+ head_ld_meta->insn.off = prev_ld_off;
+ head_st_meta->insn.off = prev_st_off;
+ head_ld_meta->ldst_gather_len =
+ -head_ld_meta->ldst_gather_len;
+ }
+
+ head_ld_meta->paired_st = &head_st_meta->insn;
+ head_st_meta->skip = true;
+ } else {
+ head_ld_meta->ldst_gather_len = 0;
+ }
+
+ /* If the chain is ended by an load/store pair then this
+ * could serve as the new head of the the next chain.
+ */
+ if (curr_pair_is_memcpy(meta1, meta2)) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ head_ld_meta->ldst_gather_len =
+ BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count = 1;
+ } else {
+ head_ld_meta = NULL;
+ head_st_meta = NULL;
+ prev_ld = NULL;
+ prev_st = NULL;
+ count = 0;
+ }
+
+ continue;
+ }
+
+ if (!head_ld_meta) {
+ head_ld_meta = meta1;
+ head_st_meta = meta2;
+ } else {
+ meta1->skip = true;
+ meta2->skip = true;
+ }
+
+ head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
+ meta1 = nfp_meta_next(meta1);
+ meta2 = nfp_meta_next(meta2);
+ prev_ld = ld;
+ prev_st = st;
+ count++;
+ }
+}
+
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
nfp_bpf_opt_reg_init(nfp_prog);
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
+ nfp_bpf_opt_ldst_gather(nfp_prog);
return 0;
}
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
{
+ __le64 *ustore = (__force __le64 *)prog;
int i;
- for (i = 0; i < nfp_prog->prog_len; i++) {
+ for (i = 0; i < len; i++) {
int err;
- err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ err = nfp_ustore_check_valid_no_ecc(prog[i]);
if (err)
return err;
- nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
- ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
}
return 0;
}
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+ void *prog;
+
+ prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+ if (!prog)
+ return;
+
+ nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+ memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+ kvfree(nfp_prog->prog);
+ nfp_prog->prog = prog;
+}
+
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
@@ -2258,5 +2879,102 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return -EINVAL;
}
- return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+ nfp_bpf_prog_trim(nfp_prog);
+
+ return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta;
+
+ /* Another pass to record jump information. */
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ u64 code = meta->insn.code;
+
+ if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+ BPF_OP(code) != BPF_CALL) {
+ struct nfp_insn_meta *dst_meta;
+ unsigned short dst_indx;
+
+ dst_indx = meta->n + 1 + meta->insn.off;
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+ cnt);
+
+ meta->jmp_dst = dst_meta;
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ }
+ }
+}
+
+bool nfp_bpf_supported_opcode(u8 code)
+{
+ return !!instr_cb[code];
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+ unsigned int i;
+ u64 *prog;
+ int err;
+
+ prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+ GFP_KERNEL);
+ if (!prog)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ enum nfp_relo_type special;
+ u32 val;
+
+ special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+ switch (special) {
+ case RELO_NONE:
+ continue;
+ case RELO_BR_REL:
+ br_add_offset(&prog[i], bv->start_off);
+ break;
+ case RELO_BR_GO_OUT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_out + bv->start_off);
+ break;
+ case RELO_BR_GO_ABORT:
+ br_set_offset(&prog[i],
+ nfp_prog->tgt_abort + bv->start_off);
+ break;
+ case RELO_BR_NEXT_PKT:
+ br_set_offset(&prog[i], bv->tgt_done);
+ break;
+ case RELO_BR_HELPER:
+ val = br_get_offset(prog[i]);
+ val -= BR_OFF_RELO;
+ switch (val) {
+ case BPF_FUNC_map_lookup_elem:
+ val = nfp_prog->bpf->helpers.map_lookup;
+ break;
+ default:
+ pr_err("relocation of unknown helper %d\n",
+ val);
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ br_set_offset(&prog[i], val);
+ break;
+ case RELO_IMMED_REL:
+ immed_add_value(&prog[i], bv->start_off);
+ break;
+ }
+
+ prog[i] &= ~OP_RELO_TYPE;
+ }
+
+ err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+ if (err)
+ goto err_free_prog;
+
+ return prog;
+
+err_free_prog:
+ kfree(prog);
+ return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e379b78e86ef..322027792fe8 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -34,10 +34,12 @@
#include <net/pkt_cls.h>
#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_port.h"
+#include "fw.h"
#include "main.h"
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -52,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn)
static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, struct netlink_ext_ack *extack)
{
bool running, xdp_running;
int ret;
@@ -68,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
if (prog && running && !xdp_running)
return -EBUSY;
- ret = nfp_net_bpf_offload(nn, prog, running);
+ ret = nfp_net_bpf_offload(nn, prog, running, extack);
/* Stop offload if replace not possible */
if (ret && prog)
- nfp_bpf_xdp_offload(app, nn, NULL);
+ nfp_bpf_xdp_offload(app, nn, NULL, extack);
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
@@ -82,10 +84,36 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+ struct nfp_bpf_vnic *bv;
+ int err;
+
+ bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+ if (!bv)
+ return -ENOMEM;
+ nn->app_priv = bv;
+
+ err = nfp_app_nic_vnic_alloc(app, nn, id);
+ if (err)
+ goto err_free_priv;
+
+ bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
+ return 0;
+err_free_priv:
+ kfree(nn->app_priv);
+ return err;
+}
+
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
- if (nn->dp.bpf_offload_xdp)
- nfp_bpf_xdp_offload(app, nn, NULL);
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
+ WARN_ON(bv->tc_prog);
+ kfree(bv);
}
static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,33 +121,56 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv;
+ struct bpf_prog *oldprog;
+ struct nfp_bpf_vnic *bv;
+ int err;
- if (type != TC_SETUP_CLSBPF ||
- !tc_can_offload(nn->dp.netdev) ||
- !nfp_net_ebpf_capable(nn) ||
- cls_bpf->common.protocol != htons(ETH_P_ALL) ||
- cls_bpf->common.chain_index)
+ if (type != TC_SETUP_CLSBPF) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
return -EOPNOTSUPP;
- if (nn->dp.bpf_offload_xdp)
- return -EBUSY;
+ }
+ if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+ if (!nfp_net_ebpf_capable(nn)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "NFP firmware does not support eBPF offload");
+ return -EOPNOTSUPP;
+ }
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
- nn_err(nn, "only direct action with no legacy actions supported\n");
+ NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
+ "only direct action with no legacy actions supported");
return -EOPNOTSUPP;
}
- switch (cls_bpf->command) {
- case TC_CLSBPF_REPLACE:
- return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
- case TC_CLSBPF_ADD:
- return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
- case TC_CLSBPF_DESTROY:
- return nfp_net_bpf_offload(nn, NULL, true);
- default:
+ if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
return -EOPNOTSUPP;
+
+ bv = nn->app_priv;
+ oldprog = cls_bpf->oldprog;
+
+ /* Don't remove if oldprog doesn't match driver's state */
+ if (bv->tc_prog != oldprog) {
+ oldprog = NULL;
+ if (!cls_bpf->prog)
+ return 0;
}
+
+ err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
+ cls_bpf->common.extack);
+ if (err)
+ return err;
+
+ bv->tc_prog = cls_bpf->prog;
+ return 0;
}
static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -158,23 +209,215 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
{
- return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
+ return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ unsigned int max_mtu;
+
+ if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return 0;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (new_mtu > max_mtu) {
+ nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+ max_mtu);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
+ struct nfp_cpp *cpp = bpf->app->pf->cpp;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->adjust_head.flags = readl(&cap->flags);
+ bpf->adjust_head.off_min = readl(&cap->off_min);
+ bpf->adjust_head.off_max = readl(&cap->off_max);
+ bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
+ bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
+
+ if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
+ nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
+ return -EINVAL;
+ }
+ if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
+ !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
+ nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
+ memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ switch (readl(&cap->func_id)) {
+ case BPF_FUNC_map_lookup_elem:
+ bpf->helpers.map_lookup = readl(&cap->func_addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+ struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+ if (length < sizeof(*cap)) {
+ nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+ return -EINVAL;
+ }
+
+ bpf->maps.types = readl(&cap->types);
+ bpf->maps.max_maps = readl(&cap->max_maps);
+ bpf->maps.max_elems = readl(&cap->max_elems);
+ bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+ bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+ bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+ return 0;
+}
+
+static int nfp_bpf_parse_capabilities(struct nfp_app *app)
+{
+ struct nfp_cpp *cpp = app->pf->cpp;
+ struct nfp_cpp_area *area;
+ u8 __iomem *mem, *start;
+
+ mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
+ 8, &area);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
+
+ start = mem;
+ while (mem - start + 8 < nfp_cpp_area_size(area)) {
+ u8 __iomem *value;
+ u32 type, length;
+
+ type = readl(mem);
+ length = readl(mem + 4);
+ value = mem + 8;
+
+ mem += 8 + length;
+ if (mem - start > nfp_cpp_area_size(area))
+ goto err_release_free;
+
+ switch (type) {
+ case NFP_BPF_CAP_TYPE_FUNC:
+ if (nfp_bpf_parse_cap_func(app->priv, value, length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
+ if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
+ case NFP_BPF_CAP_TYPE_MAPS:
+ if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+ goto err_release_free;
+ break;
+ default:
+ nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
+ break;
+ }
+ }
+ if (mem - start != nfp_cpp_area_size(area)) {
+ nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
+ mem - start, nfp_cpp_area_size(area));
+ goto err_release_free;
+ }
+
+ nfp_cpp_area_release_free(area);
+
+ return 0;
+
+err_release_free:
+ nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
+ nfp_cpp_area_release_free(area);
+ return -EINVAL;
+}
+
+static int nfp_bpf_init(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf;
+ int err;
+
+ bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
+ if (!bpf)
+ return -ENOMEM;
+ bpf->app = app;
+ app->priv = bpf;
+
+ skb_queue_head_init(&bpf->cmsg_replies);
+ init_waitqueue_head(&bpf->cmsg_wq);
+ INIT_LIST_HEAD(&bpf->map_list);
+
+ err = nfp_bpf_parse_capabilities(app);
+ if (err)
+ goto err_free_bpf;
+
+ return 0;
+
+err_free_bpf:
+ kfree(bpf);
+ return err;
+}
+
+static void nfp_bpf_clean(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ WARN_ON(!list_empty(&bpf->map_list));
+ WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+ kfree(bpf);
}
const struct nfp_app_type app_bpf = {
.id = NFP_APP_BPF_NIC,
.name = "ebpf",
+ .ctrl_cap_mask = 0,
+
+ .init = nfp_bpf_init,
+ .clean = nfp_bpf_clean,
+
+ .change_mtu = nfp_bpf_change_mtu,
+
.extra_cap = nfp_bpf_extra_cap,
- .vnic_alloc = nfp_app_nic_vnic_alloc,
+ .vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
+ .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
+
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
+ .bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload,
-
- .bpf_verifier_prep = nfp_bpf_verifier_prep,
- .bpf_translate = nfp_bpf_translate,
- .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 082a15f6dfb5..424fe8338105 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -37,22 +37,40 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include "../nfp_asm.h"
+#include "fw.h"
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
*/
-#define OP_BR_SPECIAL 0xff00000000000000ULL
+#define OP_RELO_TYPE 0xff00000000000000ULL
-enum br_special {
- OP_BR_NORMAL = 0,
- OP_BR_GO_OUT,
- OP_BR_GO_ABORT,
+enum nfp_relo_type {
+ RELO_NONE = 0,
+ /* standard internal jumps */
+ RELO_BR_REL,
+ /* internal jumps to parts of the outro */
+ RELO_BR_GO_OUT,
+ RELO_BR_GO_ABORT,
+ /* external jumps to fixed addresses */
+ RELO_BR_NEXT_PKT,
+ RELO_BR_HELPER,
+ /* immediate relocation against load address */
+ RELO_IMMED_REL,
};
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO 15000
+
enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */
@@ -78,6 +96,89 @@ enum pkt_vec {
#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
+/**
+ * struct nfp_app_bpf - bpf app priv structure
+ * @app: backpointer to the app
+ *
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @cmsg_replies: received cmsg replies waiting to be consumed
+ * @cmsg_wq: work queue for waiting for cmsg replies
+ *
+ * @map_list: list of offloaded maps
+ * @maps_in_use: number of currently offloaded maps
+ * @map_elems_in_use: number of elements allocated to offloaded maps
+ *
+ * @adjust_head: adjust head capability
+ * @flags: extra flags for adjust head
+ * @off_min: minimal packet offset within buffer required
+ * @off_max: maximum packet offset within buffer required
+ * @guaranteed_sub: amount of negative adjustment guaranteed possible
+ * @guaranteed_add: amount of positive adjustment guaranteed possible
+ *
+ * @maps: map capability
+ * @types: supported map types
+ * @max_maps: max number of maps supported
+ * @max_elems: max number of entries in each map
+ * @max_key_sz: max size of map key
+ * @max_val_sz: max size of map value
+ * @max_elem_sz: max size of map entry (key + value)
+ *
+ * @helpers: helper addressess for various calls
+ * @map_lookup: map lookup helper address
+ */
+struct nfp_app_bpf {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head cmsg_replies;
+ struct wait_queue_head cmsg_wq;
+
+ struct list_head map_list;
+ unsigned int maps_in_use;
+ unsigned int map_elems_in_use;
+
+ struct nfp_bpf_cap_adjust_head {
+ u32 flags;
+ int off_min;
+ int off_max;
+ int guaranteed_sub;
+ int guaranteed_add;
+ } adjust_head;
+
+ struct {
+ u32 types;
+ u32 max_maps;
+ u32 max_elems;
+ u32 max_key_sz;
+ u32 max_val_sz;
+ u32 max_elem_sz;
+ } maps;
+
+ struct {
+ u32 map_lookup;
+ } helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap: pointer to the offloaded BPF map
+ * @bpf: back pointer to bpf app private structure
+ * @tid: table id identifying map on datapath
+ * @l: link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+ struct bpf_offloaded_map *offmap;
+ struct nfp_app_bpf *bpf;
+ u32 tid;
+ struct list_head l;
+};
+
struct nfp_prog;
struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -89,23 +190,47 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
#define nfp_meta_next(meta) list_next_entry(meta, l)
#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+#define FLAG_INSN_IS_JUMP_DST BIT(0)
+
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
* @ptr: pointer type for memory operations
+ * @ldst_gather_len: memcpy length gathered from load/store sequence
+ * @paired_st: the paired store insn at the head of the sequence
* @ptr_not_const: pointer is not always constant
+ * @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
+ * @flags: eBPF instruction extra optimization flags
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
struct nfp_insn_meta {
struct bpf_insn insn;
- struct bpf_reg_state ptr;
- bool ptr_not_const;
+ union {
+ struct {
+ struct bpf_reg_state ptr;
+ struct bpf_insn *paired_st;
+ s16 ldst_gather_len;
+ bool ptr_not_const;
+ };
+ struct nfp_insn_meta *jmp_dst;
+ struct {
+ u32 func_id;
+ struct bpf_reg_state arg1;
+ struct bpf_reg_state arg2;
+ bool arg2_var_off;
+ };
+ };
unsigned int off;
unsigned short n;
+ unsigned short flags;
bool skip;
instr_cb_t double_cb;
@@ -134,23 +259,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
return BPF_MODE(meta->insn.code);
}
+static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
+}
+
+static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
+{
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
+}
+
/**
* struct nfp_prog - nfp BPF program
+ * @bpf: backpointer to the bpf app priv structure
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
- * @start_off: address of the first instruction in the memory
+ * @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier
+ * @adjust_head_location: if program has single adjust head call - the insn no.
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
+ struct nfp_app_bpf *bpf;
+
u64 *prog;
unsigned int prog_len;
unsigned int __prog_alloc_len;
@@ -159,34 +297,65 @@ struct nfp_prog {
enum bpf_prog_type type;
- unsigned int start_off;
+ unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
- unsigned int tgt_done;
unsigned int n_translated;
int error;
unsigned int stack_depth;
+ unsigned int adjust_head_location;
struct list_head insns;
};
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog: currently loaded cls_bpf program
+ * @start_off: address of the first instruction in the memory
+ * @tgt_done: jump target to get the next packet
+ */
+struct nfp_bpf_vnic {
+ struct bpf_prog *tc_prog;
+ unsigned int start_off;
+ unsigned int tgt_done;
+};
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
struct nfp_net;
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog);
-
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ bool old_prog, struct netlink_ext_ack *extack);
+
+struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+ void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index bc879aeb62d4..0a7732385469 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -36,18 +36,23 @@
* Netronome network device driver: TC offload functions for PF and VF
*/
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -55,11 +60,10 @@ static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
+ struct nfp_insn_meta *meta;
unsigned int i;
for (i = 0; i < cnt; i++) {
- struct nfp_insn_meta *meta;
-
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
return -ENOMEM;
@@ -70,6 +74,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
+ nfp_bpf_jit_prepare(nfp_prog, cnt);
+
return 0;
}
@@ -84,8 +90,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
@@ -98,6 +105,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
+ nfp_prog->bpf = app->priv;
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
@@ -114,12 +122,12 @@ err_free:
return ret;
}
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
+ int err;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@@ -127,50 +135,179 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
-
- nfp_prog->stack_depth = prog->aux->stack_depth;
- nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+ nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog);
+ err = nfp_bpf_jit(nfp_prog);
+ if (err)
+ return err;
+
+ prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+ prog->aux->offload->jited_image = nfp_prog->prog;
+
+ return 0;
}
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- kfree(nfp_prog->prog);
+ kvfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
-static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ if (!key)
+ return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+ return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+ return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+ .map_get_next_key = nfp_bpf_map_get_next_key,
+ .map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
+ .map_update_elem = nfp_bpf_ctrl_update_entry,
+ .map_delete_elem = nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map;
+ long long int res;
+
+ if (!bpf->maps.types)
+ return -EOPNOTSUPP;
+
+ if (offmap->map.map_flags ||
+ offmap->map.numa_node != NUMA_NO_NODE) {
+ pr_info("map flags are not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+ pr_info("map type not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->maps.max_maps == bpf->maps_in_use) {
+ pr_info("too many maps for a device\n");
+ return -ENOMEM;
+ }
+ if (bpf->maps.max_elems - bpf->map_elems_in_use <
+ offmap->map.max_entries) {
+ pr_info("map with too many elements: %u, left: %u\n",
+ offmap->map.max_entries,
+ bpf->maps.max_elems - bpf->map_elems_in_use);
+ return -ENOMEM;
+ }
+ if (offmap->map.key_size > bpf->maps.max_key_sz ||
+ offmap->map.value_size > bpf->maps.max_val_sz ||
+ round_up(offmap->map.key_size, 8) +
+ round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+ pr_info("elements don't fit in device constraints\n");
+ return -ENOMEM;
+ }
+
+ nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+ if (!nfp_map)
+ return -ENOMEM;
+
+ offmap->dev_priv = nfp_map;
+ nfp_map->offmap = offmap;
+ nfp_map->bpf = bpf;
+
+ res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+ if (res < 0) {
+ kfree(nfp_map);
+ return res;
+ }
+
+ nfp_map->tid = res;
+ offmap->dev_ops = &nfp_bpf_map_ops;
+ bpf->maps_in_use++;
+ bpf->map_elems_in_use += offmap->map.max_entries;
+ list_add_tail(&nfp_map->l, &bpf->map_list);
+
+ return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+ struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+ nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ list_del_init(&nfp_map->l);
+ bpf->map_elems_in_use -= offmap->map.max_entries;
+ bpf->maps_in_use--;
+ kfree(nfp_map);
+
+ return 0;
+}
+
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_bpf_verifier_prep(app, nn, bpf);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_bpf_translate(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_bpf_destroy(nn, bpf->offload.prog);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ return nfp_bpf_map_free(app->priv, bpf->offmap);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
+ void *img;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
- nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
return -EOPNOTSUPP;
}
- dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+ if (IS_ERR(img))
+ return PTR_ERR(img);
+
+ dma_addr = dma_map_single(nn->dp.dev, img,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
- if (dma_mapping_error(nn->dp.dev, dma_addr))
+ if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+ kfree(img);
return -ENOMEM;
+ }
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -178,15 +315,18 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
/* Load up the JITed code */
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
if (err)
- nn_err(nn, "FW command error while loading BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while loading BPF");
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
+ kfree(img);
return err;
}
-static void nfp_net_bpf_start(struct nfp_net *nn)
+static void
+nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
{
int err;
@@ -195,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
- nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW command error while enabling BPF");
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
@@ -210,12 +351,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
}
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
- bool old_prog)
+ bool old_prog, struct netlink_ext_ack *extack)
{
int err;
if (prog) {
- struct bpf_dev_offload *offload = prog->aux->offload;
+ struct bpf_prog_offload *offload = prog->aux->offload;
if (!offload)
return -EINVAL;
@@ -228,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
if (!(cap & NFP_NET_BPF_CAP_RELO)) {
- nn_err(nn, "FW does not support live reload\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW does not support live reload");
return -EBUSY;
}
}
@@ -240,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- err = nfp_net_bpf_load(nn, prog);
+ err = nfp_net_bpf_load(nn, prog, extack);
if (err)
return err;
if (!old_prog)
- nfp_net_bpf_start(nn);
+ nfp_net_bpf_start(nn, extack);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 8d43491ddd6b..479f602887e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,16 +31,18 @@
* SOFTWARE.
*/
-#define pr_fmt(fmt) "NFP net bpf: " fmt
-
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/pkt_cls.h>
+#include "fw.h"
#include "main.h"
-static struct nfp_insn_meta *
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
+struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
{
@@ -68,6 +70,114 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return meta;
}
+static void
+nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+ struct nfp_insn_meta *meta,
+ const struct bpf_reg_state *reg2)
+{
+ unsigned int location = UINT_MAX;
+ int imm;
+
+ /* Datapath usually can give us guarantees on how much adjust head
+ * can be done without the need for any checks. Optimize the simple
+ * case where there is only one adjust head by a constant.
+ */
+ if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
+ goto exit_set_location;
+ imm = reg2->var_off.value;
+ /* Translator will skip all checks, we need to guarantee min pkt len */
+ if (imm > ETH_ZLEN - ETH_HLEN)
+ goto exit_set_location;
+ if (imm > (int)bpf->adjust_head.guaranteed_add ||
+ imm < -bpf->adjust_head.guaranteed_sub)
+ goto exit_set_location;
+
+ if (nfp_prog->adjust_head_location) {
+ /* Only one call per program allowed */
+ if (nfp_prog->adjust_head_location != meta->n)
+ goto exit_set_location;
+
+ if (meta->arg2.var_off.value != imm)
+ goto exit_set_location;
+ }
+
+ location = meta->n;
+exit_set_location:
+ nfp_prog->adjust_head_location = location;
+}
+
+static int
+nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
+ struct nfp_insn_meta *meta)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
+ struct nfp_app_bpf *bpf = nfp_prog->bpf;
+ u32 func_id = meta->insn.imm;
+ s64 off, old_off;
+
+ switch (func_id) {
+ case BPF_FUNC_xdp_adjust_head:
+ if (!bpf->adjust_head.off_max) {
+ pr_vlog(env, "adjust_head not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
+ pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
+ break;
+
+ case BPF_FUNC_map_lookup_elem:
+ if (!bpf->helpers.map_lookup) {
+ pr_vlog(env, "map_lookup: not supported by FW\n");
+ return -EOPNOTSUPP;
+ }
+ if (reg2->type != PTR_TO_STACK) {
+ pr_vlog(env,
+ "map_lookup: unsupported key ptr type %d\n",
+ reg2->type);
+ return -EOPNOTSUPP;
+ }
+ if (!tnum_is_const(reg2->var_off)) {
+ pr_vlog(env, "map_lookup: variable key pointer\n");
+ return -EOPNOTSUPP;
+ }
+
+ off = reg2->var_off.value + reg2->off;
+ if (-off % 4) {
+ pr_vlog(env,
+ "map_lookup: unaligned stack pointer %lld\n",
+ -off);
+ return -EOPNOTSUPP;
+ }
+
+ /* Rest of the checks is only if we re-parse the same insn */
+ if (!meta->func_id)
+ break;
+
+ old_off = meta->arg2.var_off.value + meta->arg2.off;
+ meta->arg2_var_off |= off != old_off;
+
+ if (meta->arg1.map_ptr != reg1->map_ptr) {
+ pr_vlog(env, "map_lookup: called for different map\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ pr_vlog(env, "unsupported function id: %d\n", func_id);
+ return -EOPNOTSUPP;
+ }
+
+ meta->func_id = func_id;
+ meta->arg1 = *reg1;
+ meta->arg2 = *reg2;
+
+ return 0;
+}
+
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
struct bpf_verifier_env *env)
@@ -82,7 +192,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
- pr_info("unsupported exit state: %d, var_off: %s\n",
+ pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
reg0->type, tn_buf);
return -EINVAL;
}
@@ -92,7 +202,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
+ pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
reg0->type, imm);
return -EINVAL;
}
@@ -103,12 +213,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta,
- const struct bpf_reg_state *reg)
+ const struct bpf_reg_state *reg,
+ struct bpf_verifier_env *env)
{
s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) {
- pr_info("variable ptr stack access\n");
+ pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
}
@@ -126,7 +237,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
if (old_off % 4 == new_off % 4)
return 0;
- pr_info("stack access changed location was:%d is:%d\n",
+ pr_vlog(env, "stack access changed location was:%d is:%d\n",
old_off, new_off);
return -EINVAL;
}
@@ -140,19 +251,27 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_MAP_VALUE &&
reg->type != PTR_TO_PACKET) {
- pr_info("unsupported ptr type: %d\n", reg->type);
+ pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL;
}
if (reg->type == PTR_TO_STACK) {
- err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
if (err)
return err;
}
+ if (reg->type == PTR_TO_MAP_VALUE) {
+ if (is_mbpf_store(meta)) {
+ pr_vlog(env, "map writes not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
- pr_info("ptr type changed for instruction %d -> %d\n",
+ pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type);
return -EINVAL;
}
@@ -171,25 +290,33 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
+ if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+ pr_vlog(env, "instruction %#02x not supported\n",
+ meta->insn.code);
+ return -EINVAL;
+ }
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
- pr_err("program uses extended registers - jit hardening?\n");
+ pr_vlog(env, "program uses extended registers - jit hardening?\n");
return -EINVAL;
}
+ if (meta->insn.code == (BPF_JMP | BPF_CALL))
+ return nfp_bpf_check_call(nfp_prog, env, meta);
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(nfp_prog, env);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ if (is_mbpf_load(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
- if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ if (is_mbpf_store(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f8bb87..b3567a596fc1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
return false;
}
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
u16 tmp_flags;
- int ifindex;
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- ifindex = tcf_mirred_ifindex(action);
- out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+ out_dev = tcf_mirred_dev(action);
if (!out_dev)
return -EOPNOTSUPP;
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
return 0;
}
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+ const struct tc_action *action)
{
struct ip_tunnel_info *tun = tcf_tunnel_info(action);
-
- return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+ struct nfp_flower_priv *priv = app->priv;
+
+ switch (tun->key.tp_dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ return NFP_FL_TUNNEL_VXLAN;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+ return NFP_FL_TUNNEL_GENEVE;
+ /* FALLTHROUGH */
+ default:
+ return NFP_FL_TUNNEL_NONE;
+ }
}
static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
}
static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
- const struct tc_action *action,
- struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun,
+ enum nfp_flower_tun_type tun_type)
{
- struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
- size_t act_size = sizeof(struct nfp_fl_set_vxlan);
- u32 tmp_set_vxlan_type_index = 0;
+ size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
- if (vxlan->options_len) {
- /* Do not support options e.g. vxlan gpe. */
+ if (ip_tun->options_len)
return -EOPNOTSUPP;
- }
- set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
- set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
/* Set tunnel type and pre-tunnel index. */
- tmp_set_vxlan_type_index |=
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ tmp_set_ip_tun_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
- set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
- set_vxlan->tun_id = vxlan->key.tun_id;
- set_vxlan->tun_flags = vxlan->key.tun_flags;
- set_vxlan->ipv4_ttl = vxlan->key.ttl;
- set_vxlan->ipv4_tos = vxlan->key.tos;
+ set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+ set_tun->tun_id = ip_tun->key.tun_id;
/* Complete pre_tunnel action. */
- pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
return 0;
}
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
- struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ } else if (is_tcf_tunnel_set(a)) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ if (*tun_type == NFP_FL_TUNNEL_NONE)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
*/
if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
- sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
- *tun_type = NFP_FL_TUNNEL_VXLAN;
pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ set_tun = (void *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
if (err)
return err;
-
- *a_len += sizeof(struct nfp_fl_set_vxlan);
+ *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9cdb6a3..baaea6f1a9d8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
return 0;
}
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+ struct nfp_flower_cmsg_portreify *msg;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+ msg->reserved = 0;
+ msg->info = cpu_to_be16(exists);
+
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
static void
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
{
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_portreify *msg;
+ bool exists;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+
+ rcu_read_lock();
+ exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ rcu_read_unlock();
+ if (!exists) {
+ nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+ be32_to_cpu(msg->portnum));
+ return;
+ }
+
+ atomic_inc(&priv->reify_replies);
+ wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -168,20 +211,14 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
- nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
- cmsg_hdr->version);
- goto out;
- }
-
type = cmsg_hdr->type;
switch (type) {
+ case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+ nfp_flower_cmsg_portreify_rx(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
- case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
- nfp_flower_rx_flow_stats(app, skb);
- break;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
@@ -217,7 +254,23 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work)
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_flower_cmsg_hdr *cmsg_hdr;
+
+ cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
- skb_queue_tail(&priv->cmsg_skbs, skb);
- schedule_work(&priv->cmsg_work);
+ if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
+ nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
+ cmsg_hdr->version);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
+ /* We need to deal with stats updates from HW asap */
+ nfp_flower_rx_flow_stats(app, skb);
+ dev_consume_skb_any(skb);
+ } else {
+ skb_queue_tail(&priv->cmsg_skbs, skb);
+ schedule_work(&priv->cmsg_work);
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 66070741d55f..adfe474c2cf0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
-#define NFP_FLOWER_LAYER_META BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
#define NFP_FLOWER_LAYER_MAC BIT(2)
#define NFP_FLOWER_LAYER_TP BIT(3)
@@ -50,8 +50,7 @@
#define NFP_FLOWER_LAYER_CT BIT(6)
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
-#define NFP_FLOWER_LAYER_ETHER BIT(3)
-#define NFP_FLOWER_LAYER_ARP BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
@@ -108,6 +107,7 @@
enum nfp_flower_tun_type {
NFP_FL_TUNNEL_NONE = 0,
NFP_FL_TUNNEL_VXLAN = 2,
+ NFP_FL_TUNNEL_GENEVE = 4,
};
struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
__be16 reserved;
};
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- * 3 2 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | key_layers | mask_id | reserved |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
- u8 nfp_flow_key_layer;
- u8 mask_id;
- u16 reserved;
-};
-
struct nfp_fl_pre_tunnel {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
__be32 extra[3];
};
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
struct nfp_fl_act_head head;
__be16 reserved;
- __be64 tun_id;
+ __be64 tun_id __packed;
__be32 tun_type_index;
- __be16 tun_flags;
- u8 ipv4_ttl;
- u8 ipv4_tos;
- __be32 extra[2];
-} __packed;
+ __be32 extra[3];
+};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
* NOTE: | TCI |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
u8 nfp_flow_key_layer;
u8 mask_id;
__be16 tci;
};
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | nfp_flow_key_layer2 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+ __be32 nfp_flow_key_layer2;
+};
+
/* Port details (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
* -----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_dst |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | tun_flags | tos | ttl |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | gpe_flags | Reserved | Next Protocol |
+ * | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | VNI | Reserved |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
__be32 ip_src;
__be32 ip_dst;
- __be16 tun_flags;
- u8 tos;
- u8 ttl;
- u8 gpe_flags;
- u8 reserved[2];
- u8 nxt_proto;
+ __be32 reserved[2];
__be32 tun_id;
};
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
+ NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+ __be32 portnum;
+ u16 reserved;
+ __be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
unsigned int nbi, unsigned int nbi_port,
unsigned int phys_port);
int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c0d2d3..742d6f1575b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
@@ -98,7 +99,57 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
if (port >= reprs->num_reprs)
return NULL;
- return reprs->reprs[port];
+ return rcu_dereference(reprs->reprs[port]);
+}
+
+static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+ bool exists)
+{
+ struct nfp_reprs *reprs;
+ int i, err, count = 0;
+
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+ if (!reprs)
+ return 0;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev) {
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ err = nfp_flower_cmsg_portreify(repr, exists);
+ if (err)
+ return err;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (!tot_repl)
+ return 0;
+
+ lockdep_assert_held(&app->pf->lock);
+ err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+ atomic_read(replies) >= tot_repl,
+ msecs_to_jiffies(10));
+ if (err <= 0) {
+ nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -110,7 +161,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
if (err)
return err;
- netif_carrier_on(repr->netdev);
netif_tx_wake_all_queues(repr->netdev);
return 0;
@@ -119,7 +169,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
static int
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
{
- netif_carrier_off(repr->netdev);
netif_tx_disable(repr->netdev);
return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +189,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
netdev_priv(netdev));
}
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
+ int err;
+
+ atomic_set(replies, 0);
+ err = nfp_flower_cmsg_portreify(repr, false);
+ if (err) {
+ nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+ return;
+ }
+
+ nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +224,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
+ atomic_t *replies = &priv->reify_replies;
enum nfp_port_type port_type;
struct nfp_reprs *reprs;
+ int i, err, reify_cnt;
const u8 queue = 0;
- int i, err;
port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
NFP_PORT_VF_PORT;
@@ -170,19 +238,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
return -ENOMEM;
for (i = 0; i < cnt; i++) {
+ struct net_device *repr;
struct nfp_port *port;
u32 port_id;
- reprs->reprs[i] = nfp_repr_alloc(app);
- if (!reprs->reprs[i]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[i], repr);
/* For now we only support 1 PF */
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
- port = nfp_port_alloc(app, port_type, reprs->reprs[i]);
+ port = nfp_port_alloc(app, port_type, repr);
if (repr_type == NFP_REPR_TYPE_PF) {
port->pf_id = i;
port->vnic = priv->nn->dp.ctrl_bar;
@@ -193,11 +263,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
}
- eth_hw_addr_random(reprs->reprs[i]);
+ eth_hw_addr_random(repr);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
- err = nfp_repr_init(app, reprs->reprs[i],
+ err = nfp_repr_init(app, repr,
port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -206,14 +276,28 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
- reprs->reprs[i]->name);
+ repr->name);
}
nfp_app_reprs_set(app, repr_type, reprs);
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, repr_type, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
return err;
}
@@ -233,10 +317,11 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+ atomic_t *replies = &priv->reify_replies;
struct sk_buff *ctrl_skb;
struct nfp_reprs *reprs;
+ int err, reify_cnt;
unsigned int i;
- int err;
ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
if (!ctrl_skb)
@@ -250,17 +335,18 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
for (i = 0; i < eth_tbl->count; i++) {
unsigned int phys_port = eth_tbl->ports[i].index;
+ struct net_device *repr;
struct nfp_port *port;
u32 cmsg_port_id;
- reprs->reprs[phys_port] = nfp_repr_alloc(app);
- if (!reprs->reprs[phys_port]) {
+ repr = nfp_repr_alloc(app);
+ if (!repr) {
err = -ENOMEM;
goto err_reprs_clean;
}
+ RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
- port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
- reprs->reprs[phys_port]);
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
@@ -271,11 +357,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
goto err_reprs_clean;
}
- SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
+ SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
- err = nfp_repr_init(app, reprs->reprs[phys_port],
+ err = nfp_repr_init(app, repr,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
@@ -288,23 +374,37 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port);
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
- phys_port, reprs->reprs[phys_port]->name);
+ phys_port, repr->name);
}
nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- /* The MAC_REPR control message should be sent after the MAC
+ /* The REIFY/MAC_REPR control messages should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
* because the firmware may respond with control messages for the
* MAC representors, f.e. to provide the driver with information
* about their state, and without registration the driver will drop
* any such messages.
*/
+ atomic_set(replies, 0);
+ reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+ if (reify_cnt < 0) {
+ err = reify_cnt;
+ nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+ goto err_reprs_remove;
+ }
+
+ err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+ if (err)
+ goto err_reprs_remove;
+
nfp_ctrl_tx(app->ctrl, ctrl_skb);
return 0;
+err_reprs_remove:
+ reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
err_reprs_clean:
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
err_free_ctrl_skb:
kfree_skb(ctrl_skb);
return err;
@@ -381,7 +481,7 @@ static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
struct nfp_flower_priv *app_priv;
- u64 version;
+ u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -419,11 +519,20 @@ static int nfp_flower_init(struct nfp_app *app)
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+ init_waitqueue_head(&app_priv->reify_wait_queue);
err = nfp_flower_metadata_init(app);
if (err)
goto err_free_app_priv;
+ /* Extract the extra features supported by the firmware. */
+ features = nfp_rtsym_read_le(app->pf->rtbl,
+ "_abi_flower_extra_features", &err);
+ if (err)
+ app_priv->flower_ext_feats = 0;
+ else
+ app_priv->flower_ext_feats = features;
+
return 0;
err_free_app_priv:
@@ -456,6 +565,8 @@ static void nfp_flower_stop(struct nfp_app *app)
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
+
+ .ctrl_cap_mask = ~0U,
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
@@ -468,6 +579,7 @@ const struct nfp_app_type app_flower = {
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
+ .repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
.repr_open = nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5ae6e0..332ff0fdc038 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
+#include "cmsg.h"
+
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
#include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
#define NFP_FL_MASK_ID_LOCATION 1
#define NFP_FL_VXLAN_PORT 4789
+#define NFP_FL_GENEVE_PORT 6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE BIT(0)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
* @nn: Pointer to vNIC
* @mask_id_seed: Seed used for mask hash table
* @flower_version: HW version of flower
+ * @flower_ext_feats: Bitmap of extra features the HW supports
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
* @nfp_mac_off_count: Number of MACs in address list
* @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
+ * @reify_replies: atomically stores the number of replies received
+ * from firmware for repr reify
+ * @reify_wait_queue: wait queue for repr reify response counting
*/
struct nfp_flower_priv {
struct nfp_app *app;
struct nfp_net *nn;
u32 mask_id_seed;
u64 flower_version;
+ u64 flower_ext_feats;
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
int nfp_mac_off_count;
struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
+ atomic_t reify_replies;
+ wait_queue_head_t reify_wait_queue;
};
struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow);
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type);
int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4f0e22..37c2ecae2a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
struct tc_cls_flower_offload *flow, u8 key_type,
bool mask_version)
{
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
struct flow_dissector_key_vlan *flow_vlan;
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+ memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
/* Populate the metadata frame. */
frame->nfp_flow_key_layer = key_type;
frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
}
static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
{
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = 0;
- frame->reserved = 0;
+ frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
}
static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
}
static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_ipv4_addrs *tun_ips;
struct flow_dissector_key_keyid *vni;
- /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
- memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,80 +245,68 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- vxlan_ips =
+ tun_ips =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
target);
- frame->ip_src = vxlan_ips->src;
- frame->ip_dst = vxlan_ips->dst;
- *tun_dst = vxlan_ips->dst;
+ frame->ip_src = tun_ips->src;
+ frame->ip_dst = tun_ips->dst;
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow)
+ struct nfp_fl_payload *nfp_flow,
+ enum nfp_flower_tun_type tun_type)
{
- enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- __be32 tun_dst, tun_dst_mask = 0;
struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
- tun_type = NFP_FL_TUNNEL_VXLAN;
-
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
- flow, key_ls->key_layer, true);
- ext += sizeof(struct nfp_flower_meta_two);
- msk += sizeof(struct nfp_flower_meta_two);
-
- /* Populate Exact Port data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
- if (err)
- return err;
-
- /* Populate Mask Port Data. */
- err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
- if (err)
- return err;
-
- ext += sizeof(struct nfp_flower_in_port);
- msk += sizeof(struct nfp_flower_in_port);
- } else {
- /* Populate Exact Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
- key_ls->key_layer);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
- key_ls->key_layer);
- ext += sizeof(struct nfp_flower_meta_one);
- msk += sizeof(struct nfp_flower_meta_one);
- }
- if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
- /* Additional Metadata Fields.
- * Currently unsupported.
- */
- return -EOPNOTSUPP;
+ /* Populate Exact Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+ flow, key_ls->key_layer, false);
+ /* Populate Mask Metadata. */
+ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer, true);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ msk += sizeof(struct nfp_flower_meta_tci);
+
+ /* Populate Extended Metadata if Required. */
+ if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+ key_ls->key_layer_two);
+ nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+ key_ls->key_layer_two);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ msk += sizeof(struct nfp_flower_ext_meta);
}
+ /* Populate Exact Port data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+ nfp_repr_get_port_id(netdev),
+ false, tun_type);
+ if (err)
+ return err;
+
+ /* Populate Mask Port Data. */
+ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+ nfp_repr_get_port_id(netdev),
+ true, tun_type);
+ if (err)
+ return err;
+
+ ext += sizeof(struct nfp_flower_in_port);
+ msk += sizeof(struct nfp_flower_in_port);
+
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
/* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+ __be32 tun_dst;
+
/* Populate Exact VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
- flow, false, &tun_dst);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
/* Populate Mask VXLAN Data. */
- nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
- flow, true, &tun_dst_mask);
- ext += sizeof(struct nfp_flower_vxlan);
- msk += sizeof(struct nfp_flower_vxlan);
+ nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f55dce..08c4c6dc5f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
}
static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress)
+ bool egress,
+ enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
+ struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
return -EOPNOTSUPP;
key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
+ key_layer = NFP_FLOWER_LAYER_PORT;
+ key_size = sizeof(struct nfp_flower_meta_tci) +
+ sizeof(struct nfp_flower_in_port);
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
FLOW_DISSECTOR_KEY_ENC_PORTS,
flow->key);
- if (mask_enc_ports->dst != cpu_to_be16(~0) ||
- enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ if (mask_enc_ports->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- key_layer |= NFP_FLOWER_LAYER_VXLAN;
- key_size += sizeof(struct nfp_flower_vxlan);
+ switch (enc_ports->dst) {
+ case htons(NFP_FL_VXLAN_PORT):
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ case htons(NFP_FL_GENEVE_PORT):
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+ return -EOPNOTSUPP;
+ *tun_type = NFP_FL_TUNNEL_GENEVE;
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+ key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
} else if (egress) {
/* Reject non tunnel matches offloaded to egress repr. */
return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+ err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ &tun_type);
if (err)
goto err_free_key_ls;
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_key_ls;
}
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+ err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+ tun_type);
if (err)
goto err_destroy_flow;
@@ -457,8 +483,7 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flower, bool egress)
{
- if (!eth_proto_is_802_3(flower->common.protocol) ||
- flower->common.chain_index)
+ if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
@@ -478,7 +503,7 @@ int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -495,7 +520,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
{
struct nfp_repr *repr = cb_priv;
- if (!tc_can_offload(repr->netdev))
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 955a9f44d244..6aedef0ad433 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -32,6 +32,8 @@
*/
#include <linux/bug.h>
+#include <linux/lockdep.h>
+#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -99,13 +101,19 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
}
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
+{
+ return rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
+}
+
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs)
{
struct nfp_reprs *old;
- old = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ old = nfp_reprs_get_locked(app, type);
rcu_assign_pointer(app->reprs[type], reprs);
return old;
@@ -116,7 +124,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
struct nfp_app *app;
if (id >= ARRAY_SIZE(apps) || !apps[id]) {
- nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
+ nfp_err(pf->cpp, "unknown FW app ID 0x%02hhx, driver too old or support for FW not built in\n", id);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 0e5e0305ad1c..437964afa8ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -43,6 +43,7 @@
struct bpf_prog;
struct net_device;
struct netdev_bpf;
+struct netlink_ext_ack;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@@ -66,6 +67,9 @@ extern const struct nfp_app_type app_flower;
* struct nfp_app_type - application definition
* @id: application ID
* @name: application name
+ * @ctrl_cap_mask: ctrl vNIC capability mask, allows disabling features like
+ * IRQMOD which are on by default but counter-productive for
+ * control messages which are often latency-sensitive
* @ctrl_has_meta: control messages have prepend of type:5/port:CTRL
*
* Callbacks
@@ -77,18 +81,20 @@ extern const struct nfp_app_type app_flower;
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
* @repr_init: representor about to be registered
+ * @repr_preclean: representor about to unregistered, executed before app
+ * reference to the it is removed
* @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
+ * @change_mtu: MTU change on a netdev has been requested (veto-only, change
+ * is not guaranteed to be committed)
* @start: start application logic
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
+ * @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
- * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
- * @bpf_translate: translate call for dev-specific BPF programs
- * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -98,6 +104,7 @@ struct nfp_app_type {
enum nfp_app_id id;
const char *name;
+ u32 ctrl_cap_mask;
bool ctrl_has_meta;
int (*init)(struct nfp_app *app);
@@ -112,11 +119,15 @@ struct nfp_app_type {
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
+ int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+ int new_mtu);
+
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
@@ -125,14 +136,11 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+ int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf);
- int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
- int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog);
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -163,6 +171,7 @@ struct nfp_app {
void *priv;
};
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app)
@@ -226,12 +235,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
}
static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+ if (app->type->repr_preclean)
+ app->type->repr_preclean(app, netdev);
+}
+
+static inline void
nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
{
if (app->type->repr_clean)
app->type->repr_clean(app, netdev);
}
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+ if (!app || !app->type->change_mtu)
+ return 0;
+ return app->type->change_mtu(app, netdev, new_mtu);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -293,39 +317,29 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
return app->type->setup_tc(app, netdev, type, type_data);
}
-static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
- if (!app || !app->type->xdp_offload)
- return -EOPNOTSUPP;
- return app->type->xdp_offload(app, nn, prog);
+ if (!app || !app->type->bpf)
+ return -EINVAL;
+ return app->type->bpf(app, nn, bpf);
}
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- if (!app || !app->type->bpf_verifier_prep)
+ if (!app || !app->type->xdp_offload)
return -EOPNOTSUPP;
- return app->type->bpf_verifier_prep(app, nn, bpf);
+ return app->type->xdp_offload(app, nn, prog, extack);
}
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
- if (!app || !app->type->bpf_translate)
- return -EOPNOTSUPP;
- return app->type->bpf_translate(app, nn, prog);
-}
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
- struct bpf_prog *prog)
-{
- if (!app || !app->type->bpf_destroy)
- return -EOPNOTSUPP;
- return app->type->bpf_destroy(app, nn, prog);
+ return __nfp_ctrl_tx(app->ctrl, skb);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
@@ -378,6 +392,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
struct nfp_reprs *
+nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type);
+struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 830f6de25f47..3f6952b66a49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -41,6 +41,7 @@
const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_WRITE32_SWAP] = { 0x02, 0x5f },
[CMD_TGT_READ8] = { 0x01, 0x43 },
[CMD_TGT_READ32] = { 0x00, 0x5c },
[CMD_TGT_READ32_LE] = { 0x01, 0x5c },
@@ -49,6 +50,94 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
};
+static bool unreg_is_imm(u16 reg)
+{
+ return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
+u16 br_get_offset(u64 instr)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+ addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+ return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+ addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+ u16 addr;
+
+ addr = br_get_offset(*instr);
+ br_set_offset(instr, addr + offset);
+}
+
+static bool immed_can_modify(u64 instr)
+{
+ if (FIELD_GET(OP_IMMED_INV, instr) ||
+ FIELD_GET(OP_IMMED_SHIFT, instr) ||
+ FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+ pr_err("Can't decode/encode immed!\n");
+ return false;
+ }
+ return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+ u16 reg;
+
+ if (!immed_can_modify(instr))
+ return 0;
+
+ reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+ if (!unreg_is_imm(reg))
+ reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+ return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+ if (!immed_can_modify(*instr))
+ return;
+
+ if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+ *instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+ } else {
+ *instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+ *instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+ }
+
+ *instr &= ~OP_IMMED_IMM;
+ *instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+ u16 val;
+
+ if (!immed_can_modify(*instr))
+ return;
+
+ val = immed_get_value(*instr);
+ immed_set_value(instr, val + offset);
+}
+
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{
bool lm_id, lm_dec = false;
@@ -120,7 +209,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_unreg(dst, true);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
@@ -200,7 +290,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
/* Decode source operands */
- if (swreg_type(lreg) == swreg_type(rreg))
+ if (swreg_type(lreg) == swreg_type(rreg) &&
+ swreg_type(lreg) != NN_REG_NONE)
return -EFAULT;
if (swreg_type(lreg) == NN_REG_GPR_B ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 74d0c11ab2f9..5f9291db98e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -77,9 +77,11 @@
enum br_mask {
BR_BEQ = 0x00,
BR_BNE = 0x01,
+ BR_BMI = 0x02,
BR_BHS = 0x04,
BR_BLO = 0x05,
BR_BGE = 0x08,
+ BR_BLT = 0x09,
BR_UNC = 0x18,
};
@@ -92,6 +94,10 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
#define OP_BBYTE_BASE 0x0c800000000ULL
#define OP_BB_A_SRC 0x000000000ffULL
#define OP_BB_BYTE 0x00000000300ULL
@@ -132,6 +138,10 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
#define OP_SHF_BASE 0x08000000000ULL
#define OP_SHF_A_SRC 0x000000000ffULL
#define OP_SHF_SC 0x00000000300ULL
@@ -175,6 +185,7 @@ enum alu_op {
ALU_OP_NONE = 0x00,
ALU_OP_ADD = 0x01,
ALU_OP_NOT = 0x04,
+ ALU_OP_ADD_2B = 0x05,
ALU_OP_AND = 0x08,
ALU_OP_SUB_C = 0x0d,
ALU_OP_ADD_C = 0x11,
@@ -209,6 +220,7 @@ enum alu_dst_ab {
#define OP_CMD_CNT 0x0000e000000ULL
#define OP_CMD_SIG 0x000f0000000ULL
#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_INDIR 0x20000000000ULL
#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
@@ -219,6 +231,7 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_WRITE32_SWAP,
CMD_TGT_READ32,
CMD_TGT_READ32_LE,
CMD_TGT_READ32_SWAP,
@@ -240,6 +253,9 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
+#define CMD_OVE_LEN BIT(7)
+#define CMD_OV_LEN GENMASK(12, 8)
+
#define OP_LCSR_BASE 0x0fc00000000ULL
#define OP_LCSR_A_SRC 0x000000003ffULL
#define OP_LCSR_B_SRC 0x000000ffc00ULL
@@ -257,6 +273,7 @@ enum lcsr_wr_src {
#define OP_CARB_BASE 0x0e000000000ULL
#define OP_CARB_OR 0x00000010000ULL
+#define NFP_CSR_CTX_PTR 0x20
#define NFP_CSR_ACT_LM_ADDR0 0x64
#define NFP_CSR_ACT_LM_ADDR1 0x6c
#define NFP_CSR_ACT_LM_ADDR2 0x94
@@ -377,4 +394,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
int nfp_ustore_check_valid_no_ecc(u64 insn);
u64 nfp_ustore_calc_ecc_insn(u64 insn);
+#define NFP_IND_ME_REFL_WR_SIG_INIT 3
+#define NFP_IND_ME_CTX_PTR_BASE_MASK GENMASK(9, 0)
+#define NFP_IND_NUM_CONTEXTS 8
+
+static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
+{
+ return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
+}
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 6c9f29c2e975..eb0fc614673d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -152,18 +152,8 @@ out:
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- if (!pf->app) {
- ret = -EBUSY;
- goto out;
- }
- ret = nfp_app_eswitch_mode_get(pf->app, mode);
-out:
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_get(pf->app, mode);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 35eaccbece36..cc570bb6563c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -45,6 +45,7 @@
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "nfpcore/nfp.h"
@@ -498,17 +499,16 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_hwinfo_free;
- err = devlink_register(devlink, &pdev->dev);
- if (err)
- goto err_hwinfo_free;
-
err = nfp_nsp_init(pdev, pf);
if (err)
- goto err_devlink_unreg;
+ goto err_hwinfo_free;
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
+ pf->dump_flag = NFP_DUMP_NSP_DIAG;
+ pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
+
err = nfp_pcie_sriov_read_nfd_limit(pf);
if (err)
goto err_fw_unload;
@@ -518,6 +518,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"Error: %d VFs already enabled, but loaded FW can only support %d\n",
pf->num_vfs, pf->limit_vfs);
+ err = -EINVAL;
goto err_fw_unload;
}
@@ -544,8 +545,7 @@ err_fw_unload:
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
kfree(pf->nspi);
-err_devlink_unreg:
- devlink_unregister(devlink);
+ vfree(pf->dumpspec);
err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
@@ -566,19 +566,15 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
- struct devlink *devlink;
nfp_hwmon_unregister(pf);
- devlink = priv_to_devlink(pf);
-
- nfp_net_pci_remove(pf);
-
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
- devlink_unregister(devlink);
+ nfp_net_pci_remove(pf);
+ vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
if (pf->fw_loaded)
@@ -592,7 +588,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
- devlink_free(devlink);
+ devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index be0ee59f2eb9..add46e28212b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -39,6 +39,7 @@
#ifndef NFP_MAIN_H
#define NFP_MAIN_H
+#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
@@ -62,6 +63,17 @@ struct nfp_port;
struct nfp_rtsym_table;
/**
+ * struct nfp_dumpspec - NFP FW dump specification structure
+ * @size: Size of the data
+ * @data: Sequence of TLVs, each being an instruction to dump some data
+ * from FW
+ */
+struct nfp_dumpspec {
+ u32 size;
+ u8 data[0];
+};
+
+/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
@@ -83,6 +95,9 @@ struct nfp_rtsym_table;
* @mip: MIP handle
* @rtbl: RTsym table
* @hwinfo: HWInfo table
+ * @dumpspec: Debug dump specification
+ * @dump_flag: Store dump flag between set_dump and get_dump_flag
+ * @dump_len: Store dump length between set_dump and get_dump_flag
* @eth_tbl: NSP ETH table
* @nspi: NSP identification info
* @hwmon_dev: pointer to hwmon device
@@ -124,6 +139,9 @@ struct nfp_pf {
const struct nfp_mip *mip;
struct nfp_rtsym_table *rtbl;
struct nfp_hwinfo *hwinfo;
+ struct nfp_dumpspec *dumpspec;
+ u32 dump_flag;
+ u32 dump_len;
struct nfp_eth_table *eth_tbl;
struct nfp_nsp_identify *nspi;
@@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+enum nfp_dump_diag {
+ NFP_DUMP_NSP_DIAG = 0,
+};
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl);
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag);
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest);
+
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 7f9857c276b1..d88eda9707e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
#include "nfp_net_ctrl.h"
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq: RX-ring info avail for XDP
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
u32 idx;
int fl_qcidx;
+ unsigned int size;
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
struct nfp_net_rx_desc *rxds;
dma_addr_t dma;
- unsigned int size;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned;
/**
@@ -548,6 +551,8 @@ struct nfp_net_dp {
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
+ * @stride_rx: Queue controller RX queue spacing
+ * @stride_tx: Queue controller TX queue spacing
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
@@ -573,6 +578,7 @@ struct nfp_net_dp {
* @qcp_cfg: Pointer to QCP queue used for configuration notification
* @tx_bar: Pointer to mapped TX queues
* @rx_bar: Pointer to mapped FL/RX queues
+ * @tlv_caps: Parsed TLV capabilities
* @debugfs_dir: Device directory in debugfs
* @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
@@ -639,6 +645,8 @@ struct nfp_net {
u8 __iomem *tx_bar;
u8 __iomem *rx_bar;
+ struct nfp_net_tlv_caps tlv_caps;
+
struct dentry *debugfs_dir;
struct list_head vnic_list;
@@ -834,6 +842,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
}
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+ __acquires(&nn->r_vecs[0].lock)
+{
+ spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+ __releases(&nn->r_vecs[0].lock)
+{
+ spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..c0fd351c86b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -293,9 +293,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*/
static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
+ u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
+ if (!nfp_net_has_mbox(&nn->tlv_caps)) {
+ nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
+ return -EIO;
+ }
+
+ nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
@@ -303,7 +309,7 @@ static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return ret;
}
- return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
+ return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
/* Interrupt configuration and handling
@@ -568,6 +574,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
return err;
}
nn_writeb(nn, ctrl_offset, entry->entry);
+ nfp_net_irq_unmask(nn, entry->entry);
return 0;
}
@@ -582,6 +589,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
unsigned int vector_idx)
{
nn_writeb(nn, ctrl_offset, 0xff);
+ nn_pci_flush(nn);
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
@@ -1608,11 +1616,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
+ struct xdp_buff xdp;
int idx;
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp.rxq = &rx_ring->xdp_rxq;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1703,7 +1713,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
dp->bpf_offload_xdp) && !meta.portid) {
void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- struct xdp_buff xdp;
int act;
xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -1917,6 +1926,13 @@ err_free:
return false;
}
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
@@ -2252,6 +2268,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -2275,7 +2293,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
- int sz;
+ int sz, err;
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx);
+ if (err < 0)
+ return err;
+ }
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2439,7 +2464,7 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* copy RX interrupt coalesce parameters */
value = (nn->rx_coalesce_max_frames << 16) |
@@ -2850,6 +2875,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
new_ctrl = nn->dp.ctrl;
+ if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+ else
+ new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
if (netdev->flags & IFF_PROMISC) {
if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3034,6 +3064,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp;
+ int err;
+
+ err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+ if (err)
+ return err;
dp = nfp_net_clone_dp(nn);
if (!dp)
@@ -3055,8 +3090,9 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
}
@@ -3072,8 +3108,9 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
- nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
+ nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
+ ETH_P_8021Q);
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
@@ -3366,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
if (err)
return err;
- err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
+ err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
if (err && flags & XDP_FLAGS_HW_MODE)
return err;
@@ -3392,17 +3429,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
if (nn->dp.bpf_offload_xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
+ xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
return 0;
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_app_bpf_translate(nn->app, nn,
- xdp->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_app_bpf_destroy(nn->app, nn,
- xdp->offload.prog);
default:
- return -EINVAL;
+ return nfp_app_bpf(nn->app, nn, xdp);
}
}
@@ -3561,9 +3591,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
*/
void nfp_net_free(struct nfp_net *nn)
{
- if (nn->xdp_prog)
- bpf_prog_put(nn->xdp_prog);
-
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3729,18 +3756,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nfp_net_set_ethtool_ops(netdev);
}
-/**
- * nfp_net_init() - Initialise/finalise the nfp_net structure
- * @nn: NFP Net device structure
- *
- * Return: 0 on success or negative errno on error.
- */
-int nfp_net_init(struct nfp_net *nn)
+static int nfp_net_read_caps(struct nfp_net *nn)
{
- int err;
-
- nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
-
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
@@ -3773,6 +3790,29 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* For control vNICs mask out the capabilities app doesn't want. */
+ if (!nn->dp.netdev)
+ nn->cap &= nn->app->type->ctrl_cap_mask;
+
+ return 0;
+}
+
+/**
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
+ *
+ * Return: 0 on success or negative errno on error.
+ */
+int nfp_net_init(struct nfp_net *nn)
+{
+ int err;
+
+ nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
+
+ err = nfp_net_read_caps(nn);
+ if (err)
+ return err;
+
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
nn->dp.mtu = nn->max_mtu;
@@ -3789,8 +3829,6 @@ int nfp_net_init(struct nfp_net *nn)
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
- if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* Allow IRQ moderation, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
@@ -3798,6 +3836,11 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ return err;
+
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
new file mode 100644
index 000000000000..ffb402746ad4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2018 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+static void nfp_net_tlv_caps_reset(struct nfp_net_tlv_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ caps->me_freq_mhz = 1200;
+ caps->mbox_off = NFP_NET_CFG_MBOX_BASE;
+ caps->mbox_len = NFP_NET_CFG_MBOX_VAL_MAX_SZ;
+}
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps)
+{
+ u8 __iomem *data = ctrl_mem + NFP_NET_CFG_TLV_BASE;
+ u8 __iomem *end = ctrl_mem + NFP_NET_CFG_BAR_SZ;
+ u32 hdr;
+
+ nfp_net_tlv_caps_reset(caps);
+
+ hdr = readl(data);
+ if (!hdr)
+ return 0;
+
+ while (true) {
+ unsigned int length, offset;
+ u32 hdr = readl(data);
+
+ length = FIELD_GET(NFP_NET_CFG_TLV_HEADER_LENGTH, hdr);
+ offset = data - ctrl_mem + NFP_NET_CFG_TLV_BASE;
+
+ /* Advance past the header */
+ data += 4;
+
+ if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
+ dev_err(dev, "TLV size not multiple of %u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, length);
+ return -EINVAL;
+ }
+ if (data + length > end) {
+ dev_err(dev, "oversized TLV offset:%u len:%u\n",
+ offset, length);
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr)) {
+ case NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ dev_err(dev, "NULL TLV at offset:%u\n", offset);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_RESERVED:
+ break;
+ case NFP_NET_CFG_TLV_TYPE_END:
+ if (!length)
+ return 0;
+
+ dev_err(dev, "END TLV should be empty, has len:%d\n",
+ length);
+ return -EINVAL;
+ case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ if (length != 4) {
+ dev_err(dev,
+ "ME FREQ TLV should be 4B, is %dB\n",
+ length);
+ return -EINVAL;
+ }
+
+ caps->me_freq_mhz = readl(data);
+ break;
+ case NFP_NET_CFG_TLV_TYPE_MBOX:
+ if (!length) {
+ caps->mbox_off = 0;
+ caps->mbox_len = 0;
+ } else {
+ caps->mbox_off = data - ctrl_mem;
+ caps->mbox_len = length;
+ }
+ break;
+ default:
+ if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
+ break;
+
+ dev_err(dev, "unknown TLV type:%u offset:%u len:%u\n",
+ FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+ offset, length);
+ return -EINVAL;
+ }
+
+ data += length;
+ if (data + 4 > end) {
+ dev_err(dev, "reached end of BAR without END TLV\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Not reached */
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..eeecef2caac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -43,9 +43,7 @@
#ifndef _NFP_NET_CTRL_H_
#define _NFP_NET_CTRL_H_
-/* IMPORTANT: This header file is shared with the FW,
- * no OS specific constructs, please!
- */
+#include <linux/types.h>
/**
* Configuration BAR size.
@@ -91,23 +89,24 @@
#define NFP_NET_RSS_IPV6_EX_UDP 9
/**
- * @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_RXR_MAX: Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX: Maximum number of TX rings
+ * %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
/**
* Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL: Global control
- * @NFP_NET_CFG_UPDATE: Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU: Set MTU size
- * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR: MAC address
+ * %NFP_NET_CFG_CTRL: Global control
+ * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU: Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR: MAC address
*
* TODO:
* - define Error details in UPDATE
@@ -176,14 +175,14 @@
/**
* Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION: Firmware version number
- * @NFP_NET_CFG_STS: Status
- * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
- * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION: Firmware version number
+ * %NFP_NET_CFG_STS: Status
+ * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
* TODO:
* - define more STS bits
@@ -228,31 +227,37 @@
/**
* RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
- * @NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
+ * %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
/**
+ * TLV area start
+ * %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
+ */
+#define NFP_NET_CFG_TLV_BASE 0x0058
+
+/**
* VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
* BPF section
- * @NFP_NET_CFG_BPF_ABI: BPF ABI version
- * @NFP_NET_CFG_BPF_CAP: BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2
@@ -278,9 +283,9 @@
/**
* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG: RSS configuration word
- * @NFP_NET_CFG_RSS_KEY: RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL: RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG: RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/
#define NFP_NET_CFG_RSS_BASE 0x0100
#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
@@ -305,13 +310,13 @@
/**
* TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/
#define NFP_NET_CFG_TXR_BASE 0x0200
#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +330,12 @@
/**
* RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/
#define NFP_NET_CFG_RXR_BASE 0x0800
#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +348,7 @@
/**
* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
* corresponding entry is enabled. If the FW generates an interrupt,
* it writes a cause into the corresponding field. This also masks
@@ -393,8 +398,8 @@
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -408,24 +413,105 @@
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
-#define NFP_NET_CFG_MBOX_CMD 0x1800
-#define NFP_NET_CFG_MBOX_RET 0x1804
-#define NFP_NET_CFG_MBOX_VAL 0x1808
+#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
+#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
+#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
+#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
+#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
+
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
/**
* VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
*/
-#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
+#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_SIMPLE_VAL
#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
+/**
+ * TLV capabilities
+ * %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
+ * %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
+ * %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
+ * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
+ * %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV
+ *
+ * List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
+ * Last structure must be of type %NFP_NET_CFG_TLV_TYPE_END. Presence of TLVs
+ * is indicated by %NFP_NET_CFG_TLV_BASE being non-zero. TLV structures may
+ * fill the entire remainder of the BAR or be shorter. FW must make sure TLVs
+ * don't conflict with other features which allocate space beyond
+ * %NFP_NET_CFG_TLV_BASE. %NFP_NET_CFG_TLV_TYPE_RESERVED should be used to wrap
+ * space used by such features.
+ * Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
+ */
+#define NFP_NET_CFG_TLV_TYPE 0x00
+#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
+#define NFP_NET_CFG_TLV_LENGTH 0x02
+#define NFP_NET_CFG_TLV_LENGTH_INC 4
+#define NFP_NET_CFG_TLV_VALUE 0x04
+
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
+#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
+#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
+
+/**
+ * Capability TLV types
+ *
+ * %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
+ * Special TLV type to catch bugs, should never be encountered. Drivers should
+ * treat encountering this type as error and refuse to probe.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_RESERVED:
+ * Reserved space, may contain legacy fixed-offset fields, or be used for
+ * padding. The use of this type should be otherwise avoided.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_END:
+ * Empty, end of TLV list. Must be the last TLV. Drivers will stop processing
+ * further TLVs when encountered.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_ME_FREQ:
+ * Single word, ME frequency in MHz as used in calculation for
+ * %NFP_NET_CFG_RXR_IRQ_MOD and %NFP_NET_CFG_TXR_IRQ_MOD.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_MBOX:
+ * Variable, mailbox area. Overwrites the default location which is
+ * %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ */
+#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
+#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
+#define NFP_NET_CFG_TLV_TYPE_END 2
+#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3
+#define NFP_NET_CFG_TLV_TYPE_MBOX 4
+
+struct device;
+
+/**
+ * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+ * @me_freq_mhz: ME clock_freq (MHz)
+ * @mbox_off: vNIC mailbox area offset
+ * @mbox_len: vNIC mailbox area length
+ */
+struct nfp_net_tlv_caps {
+ u32 me_freq_mhz;
+ unsigned int mbox_off;
+ unsigned int mbox_len;
+};
+
+int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
+ struct nfp_net_tlv_caps *caps);
+
+static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
+{
+ return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
+}
+
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
new file mode 100644
index 000000000000..bb8ed460086e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "nfp_asm.h"
+#include "nfp_main.h"
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp6000/nfp6000.h"
+
+#define NFP_DUMP_SPEC_RTSYM "_abi_dump_spec"
+
+#define ALIGN8(x) ALIGN(x, 8)
+
+enum nfp_dumpspec_type {
+ NFP_DUMPSPEC_TYPE_CPP_CSR = 0,
+ NFP_DUMPSPEC_TYPE_XPB_CSR = 1,
+ NFP_DUMPSPEC_TYPE_ME_CSR = 2,
+ NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3,
+ NFP_DUMPSPEC_TYPE_RTSYM = 4,
+ NFP_DUMPSPEC_TYPE_HWINFO = 5,
+ NFP_DUMPSPEC_TYPE_FWNAME = 6,
+ NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7,
+ NFP_DUMPSPEC_TYPE_PROLOG = 10000,
+ NFP_DUMPSPEC_TYPE_ERROR = 10001,
+};
+
+/* The following structs must be carefully aligned so that they can be used to
+ * interpret the binary dumpspec and populate the dump data in a deterministic
+ * way.
+ */
+
+/* generic type plus length */
+struct nfp_dump_tl {
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ char data[0];
+};
+
+/* NFP CPP parameters */
+struct nfp_dumpspec_cpp_isl_id {
+ u8 target;
+ u8 action;
+ u8 token;
+ u8 island;
+};
+
+struct nfp_dump_common_cpp {
+ struct nfp_dumpspec_cpp_isl_id cpp_id;
+ __be32 offset; /* address to start dump */
+ __be32 dump_length; /* total bytes to dump, aligned to reg size */
+};
+
+/* CSR dumpables */
+struct nfp_dumpspec_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+};
+
+struct nfp_dumpspec_rtsym {
+ struct nfp_dump_tl tl;
+ char rtsym[0];
+};
+
+/* header for register dumpable */
+struct nfp_dump_csr {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 register_width; /* in bits */
+ __be32 error; /* error code encountered while reading */
+ __be32 error_offset; /* offset being read when error occurred */
+};
+
+struct nfp_dump_rtsym {
+ struct nfp_dump_tl tl;
+ struct nfp_dump_common_cpp cpp;
+ __be32 error; /* error code encountered while reading */
+ u8 padded_name_length; /* pad so data starts at 8 byte boundary */
+ char rtsym[0];
+ /* after padded_name_length, there is dump_length data */
+};
+
+struct nfp_dump_prolog {
+ struct nfp_dump_tl tl;
+ __be32 dump_level;
+};
+
+struct nfp_dump_error {
+ struct nfp_dump_tl tl;
+ __be32 error;
+ char padding[4];
+ char spec[0];
+};
+
+/* to track state through debug size calculation TLV traversal */
+struct nfp_level_size {
+ __be32 requested_level; /* input */
+ u32 total_size; /* output */
+};
+
+/* to track state during debug dump creation TLV traversal */
+struct nfp_dump_state {
+ __be32 requested_level; /* input param */
+ u32 dumped_size; /* adds up to size of dumped data */
+ u32 buf_size; /* size of buffer pointer to by p */
+ void *p; /* current point in dump buffer */
+};
+
+typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl,
+ void *param);
+
+static int
+nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param,
+ nfp_tlv_visit tlv_visit)
+{
+ long long remaining = data_length;
+ struct nfp_dump_tl *tl;
+ u32 total_tlv_size;
+ void *p = data;
+ int err;
+
+ while (remaining >= sizeof(*tl)) {
+ tl = p;
+ if (!tl->type && !tl->length)
+ break;
+
+ if (be32_to_cpu(tl->length) > remaining - sizeof(*tl))
+ return -EINVAL;
+
+ total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length);
+
+ /* Spec TLVs should be aligned to 4 bytes. */
+ if (total_tlv_size % 4 != 0)
+ return -EINVAL;
+
+ p += total_tlv_size;
+ remaining -= total_tlv_size;
+ err = tlv_visit(pf, tl, param);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token,
+ cpp_id->island);
+}
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
+{
+ const struct nfp_rtsym *specsym;
+ struct nfp_dumpspec *dumpspec;
+ int bytes_read;
+ u32 cpp_id;
+
+ specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
+ if (!specsym)
+ return NULL;
+
+ /* expected size of this buffer is in the order of tens of kilobytes */
+ dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+ if (!dumpspec)
+ return NULL;
+
+ dumpspec->size = specsym->size;
+
+ cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
+ specsym->domain);
+
+ bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
+ specsym->size);
+ if (bytes_read != specsym->size) {
+ vfree(dumpspec);
+ nfp_warn(cpp, "Debug dump specification read failed.\n");
+ return NULL;
+ }
+
+ return dumpspec;
+}
+
+static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec)
+{
+ return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) +
+ be32_to_cpu(spec->length));
+}
+
+static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf)
+{
+ u32 fwname_len = strlen(nfp_mip_name(pf->mip));
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1);
+}
+
+static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ u32 tl_len, key_len;
+ const char *value;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ value = nfp_hwinfo_lookup(pf->hwinfo, spec->data);
+ if (!value)
+ return nfp_dump_error_tlv_size(spec);
+
+ return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2);
+}
+
+static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr)
+{
+ u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl);
+ u32 available_sz = be32_to_cpu(spec_csr->tl.length);
+ u32 reg_width;
+
+ if (available_sz < required_read_sz)
+ return false;
+
+ reg_width = be32_to_cpu(spec_csr->register_width);
+
+ return reg_width == 32 || reg_width == 64;
+}
+
+static int
+nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ u32 size;
+
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec_rtsym->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv_size(spec);
+
+ sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv_size(spec);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ size = sizeof(sym->addr);
+ else
+ size = sym->size;
+
+ return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
+ ALIGN8(size);
+}
+
+static int
+nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_csr *spec_csr;
+ u32 *size = param;
+ u32 hwinfo_size;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ *size += nfp_calc_fwname_tlv_size(pf);
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ if (!nfp_csr_spec_valid(spec_csr))
+ *size += nfp_dump_error_tlv_size(tl);
+ else
+ *size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS);
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ *size += nfp_calc_rtsym_dump_sz(pf, tl);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ *size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size);
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ *size += nfp_calc_hwinfo_field_sz(pf, tl);
+ break;
+ default:
+ *size += nfp_dump_error_tlv_size(tl);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_level_size *lev_sz = param;
+
+ if (dump_level->type != lev_sz->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length),
+ &lev_sz->total_size, nfp_add_tlv_size);
+}
+
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ u32 flag)
+{
+ struct nfp_level_size lev_sz;
+ int err;
+
+ lev_sz.requested_level = cpu_to_be32(flag);
+ lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog));
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz,
+ nfp_calc_specific_level_size);
+ if (err)
+ return err;
+
+ return lev_sz.total_size;
+}
+
+static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *tl = dump->p;
+
+ if (total_tlv_sz > dump->buf_size)
+ return -ENOSPC;
+
+ if (dump->buf_size - total_tlv_sz < dump->dumped_size)
+ return -ENOSPC;
+
+ tl->type = cpu_to_be32(type);
+ tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl));
+
+ dump->dumped_size += total_tlv_sz;
+ dump->p += total_tlv_sz;
+
+ return 0;
+}
+
+static int
+nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_error *dump_header = dump->p;
+ u32 total_spec_size, total_size;
+ int err;
+
+ total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length);
+ total_size = ALIGN8(sizeof(*dump_header) + total_spec_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->error = cpu_to_be32(error);
+ memcpy(dump_header->spec, spec, total_spec_size);
+
+ return 0;
+}
+
+static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 fwname_len, total_size;
+ const char *fwname;
+ int err;
+
+ fwname = nfp_mip_name(pf->mip);
+ fwname_len = strlen(fwname);
+ total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, fwname, fwname_len);
+
+ return 0;
+}
+
+static int
+nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 hwinfo_size, total_size;
+ char *hwinfo;
+ int err;
+
+ hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo);
+ hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+ total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size);
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, hwinfo, hwinfo_size);
+
+ return 0;
+}
+
+static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_tl *dump_header = dump->p;
+ u32 tl_len, key_len, val_len;
+ const char *key, *value;
+ u32 total_size;
+ int err;
+
+ tl_len = be32_to_cpu(spec->length);
+ key_len = strnlen(spec->data, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(spec, -EINVAL, dump);
+
+ key = spec->data;
+ value = nfp_hwinfo_lookup(pf->hwinfo, key);
+ if (!value)
+ return nfp_dump_error_tlv(spec, -ENOENT, dump);
+
+ val_len = strlen(value);
+ total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2);
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump);
+ if (err)
+ return err;
+
+ memcpy(dump_header->data, key, key_len + 1);
+ memcpy(dump_header->data + key_len + 1, value, val_len + 1);
+
+ return 0;
+}
+
+static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+ return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB &&
+ cpp_id->action == 0 && cpp_id->token == 0;
+}
+
+static int
+nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ int bytes_read;
+ void *dest;
+ u32 cpp_id;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ total_size = header_size +
+ ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id);
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+
+ while (cpp_rd_addr < max_rd_addr) {
+ if (is_xpb_read(&spec_csr->cpp.cpp_id)) {
+ err = nfp_xpb_readl(pf->cpp, cpp_rd_addr, (u32 *)dest);
+ } else {
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
+ dest, reg_sz);
+ err = bytes_read == reg_sz ? 0 : -EIO;
+ }
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz;
+ }
+
+ return 0;
+}
+
+/* Write context to CSRCtxPtr, then read from it. Then the value can be read
+ * from IndCtxStatus.
+ */
+static int
+nfp_read_indirect_csr(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset,
+ u32 reg_sz, u32 context, void *dest)
+{
+ u32 csr_ctx_ptr_offs;
+ u32 cpp_id;
+ int result;
+
+ csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset);
+ cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target,
+ NFP_IND_ME_REFL_WR_SIG_INIT,
+ cpp_params.token, cpp_params.island);
+ result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
+ if (result)
+ return result;
+
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz);
+ if (result != reg_sz)
+ return result < 0 ? result : -EIO;
+
+ return 0;
+}
+
+static int
+nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp,
+ struct nfp_dumpspec_csr *spec_csr, u32 address,
+ u32 reg_sz, void *dest)
+{
+ u32 ctx;
+ int err;
+
+ for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) {
+ err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address,
+ reg_sz, ctx, dest + ctx * reg_sz);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_indirect_csr_range(struct nfp_pf *pf,
+ struct nfp_dumpspec_csr *spec_csr,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_csr *dump_header = dump->p;
+ u32 reg_sz, header_size, total_size;
+ u32 cpp_rd_addr, max_rd_addr;
+ u32 reg_data_length;
+ void *dest;
+ int err;
+
+ if (!nfp_csr_spec_valid(spec_csr))
+ return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+ reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+ header_size = ALIGN8(sizeof(*dump_header));
+ reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) *
+ NFP_IND_NUM_CONTEXTS;
+ total_size = header_size + ALIGN8(reg_data_length);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->cpp = spec_csr->cpp;
+ dump_header->register_width = spec_csr->register_width;
+
+ cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+ max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+ while (cpp_rd_addr < max_rd_addr) {
+ err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr,
+ cpp_rd_addr, reg_sz, dest);
+ if (err) {
+ dump_header->error = cpu_to_be32(err);
+ dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+ break;
+ }
+ cpp_rd_addr += reg_sz;
+ dest += reg_sz * NFP_IND_NUM_CONTEXTS;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
+ struct nfp_dump_state *dump)
+{
+ struct nfp_dump_rtsym *dump_header = dump->p;
+ struct nfp_dumpspec_cpp_isl_id cpp_params;
+ struct nfp_rtsym_table *rtbl = pf->rtbl;
+ u32 header_size, total_size, sym_size;
+ const struct nfp_rtsym *sym;
+ u32 tl_len, key_len;
+ int bytes_read;
+ u32 cpp_id;
+ void *dest;
+ int err;
+
+ tl_len = be32_to_cpu(spec->tl.length);
+ key_len = strnlen(spec->rtsym, tl_len);
+ if (key_len == tl_len)
+ return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+
+ sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
+ if (!sym)
+ return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS)
+ sym_size = sizeof(sym->addr);
+ else
+ sym_size = sym->size;
+
+ header_size =
+ ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
+ total_size = header_size + ALIGN8(sym_size);
+ dest = dump->p + header_size;
+
+ err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ if (err)
+ return err;
+
+ dump_header->padded_name_length =
+ header_size - offsetof(struct nfp_dump_rtsym, rtsym);
+ memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
+ dump_header->cpp.dump_length = cpu_to_be32(sym_size);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS) {
+ *(u64 *)dest = sym->addr;
+ } else {
+ cpp_params.target = sym->target;
+ cpp_params.action = NFP_CPP_ACTION_RW;
+ cpp_params.token = 0;
+ cpp_params.island = sym->domain;
+ cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+ dump_header->cpp.cpp_id = cpp_params;
+ dump_header->cpp.offset = cpu_to_be32(sym->addr);
+ bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
+ sym_size);
+ if (bytes_read != sym_size) {
+ if (bytes_read >= 0)
+ bytes_read = -EIO;
+ dump_header->error = cpu_to_be32(bytes_read);
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+ struct nfp_dumpspec_rtsym *spec_rtsym;
+ struct nfp_dump_state *dump = param;
+ struct nfp_dumpspec_csr *spec_csr;
+ int err;
+
+ switch (be32_to_cpu(tl->type)) {
+ case NFP_DUMPSPEC_TYPE_FWNAME:
+ err = nfp_dump_fwname(pf, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_CPP_CSR:
+ case NFP_DUMPSPEC_TYPE_XPB_CSR:
+ case NFP_DUMPSPEC_TYPE_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+ spec_csr = (struct nfp_dumpspec_csr *)tl;
+ err = nfp_dump_indirect_csr_range(pf, spec_csr, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_RTSYM:
+ spec_rtsym = (struct nfp_dumpspec_rtsym *)tl;
+ err = nfp_dump_single_rtsym(pf, spec_rtsym, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO:
+ err = nfp_dump_hwinfo(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+ err = nfp_dump_hwinfo_field(pf, tl, dump);
+ if (err)
+ return err;
+ break;
+ default:
+ err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+ void *param)
+{
+ struct nfp_dump_state *dump = param;
+
+ if (dump_level->type != dump->requested_level)
+ return 0;
+
+ return nfp_traverse_tlvs(pf, dump_level->data,
+ be32_to_cpu(dump_level->length), dump,
+ nfp_dump_for_tlv);
+}
+
+static int nfp_dump_populate_prolog(struct nfp_dump_state *dump)
+{
+ struct nfp_dump_prolog *prolog = dump->p;
+ u32 total_size;
+ int err;
+
+ total_size = ALIGN8(sizeof(*prolog));
+
+ err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump);
+ if (err)
+ return err;
+
+ prolog->dump_level = dump->requested_level;
+
+ return 0;
+}
+
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+ struct ethtool_dump *dump_param, void *dest)
+{
+ struct nfp_dump_state dump;
+ int err;
+
+ dump.requested_level = cpu_to_be32(dump_param->flag);
+ dump.dumped_size = 0;
+ dump.p = dest;
+ dump.buf_size = dump_param->len;
+
+ err = nfp_dump_populate_prolog(&dump);
+ if (err)
+ return err;
+
+ err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump,
+ nfp_dump_specific_level);
+ if (err)
+ return err;
+
+ /* Set size of actual dump, to trigger warning if different from
+ * calculated size.
+ */
+ dump_param->len = dump.dumped_size;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd09eab..e1dae0616f52 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -47,18 +47,16 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
+#include <linux/firmware.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
+#include "nfp_main.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_port.h"
-enum nfp_dump_diag {
- NFP_DUMP_NSP_DIAG = 0,
-};
-
struct nfp_et_stat {
char name[ETH_GSTRING_LEN];
int off;
@@ -333,7 +331,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
ls >= ARRAY_SIZE(ls_to_ethtool))
return 0;
- cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.speed = ls_to_ethtool[ls];
cmd->base.duplex = DUPLEX_FULL;
return 0;
@@ -1066,15 +1064,34 @@ exit_release:
return ret;
}
+/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
+ * based dumps), since flag 0 (default) calculates the length in
+ * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
+ * without setting the flag first, for backward compatibility.
+ */
static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
+ s64 len;
if (!app)
return -EOPNOTSUPP;
- if (val->flag != NFP_DUMP_NSP_DIAG)
- return -EINVAL;
+ if (val->flag == NFP_DUMP_NSP_DIAG) {
+ app->pf->dump_flag = val->flag;
+ return 0;
+ }
+
+ if (!app->pf->dumpspec)
+ return -EOPNOTSUPP;
+
+ len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
+ val->flag);
+ if (len < 0)
+ return len;
+
+ app->pf->dump_flag = val->flag;
+ app->pf->dump_len = len;
return 0;
}
@@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
static int
nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, NULL);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return 0;
}
static int
nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
void *buffer)
{
- return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+ if (!app)
+ return -EOPNOTSUPP;
+
+ if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+ return nfp_dump_nsp_diag(app, dump, buffer);
+
+ dump->flag = app->pf->dump_flag;
+ dump->len = app->pf->dump_len;
+
+ return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
+ buffer);
}
static int nfp_net_set_coalesce(struct net_device *netdev,
@@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int
+nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *fw;
+ struct nfp_app *app;
+ struct nfp_nsp *nsp;
+ struct device *dev;
+ int err;
+
+ if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+ return -EOPNOTSUPP;
+
+ app = nfp_app_from_netdev(netdev);
+ if (!app)
+ return -EOPNOTSUPP;
+
+ dev = &app->pdev->dev;
+
+ nsp = nfp_nsp_open(app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ dev_err(dev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ err = request_firmware_direct(&fw, flash->data, dev);
+ if (err)
+ goto exit_close_nsp;
+
+ dev_info(dev, "Please be patient while writing flash image: %s\n",
+ flash->data);
+ dev_hold(netdev);
+ rtnl_unlock();
+
+ err = nfp_nsp_write_flash(nsp, fw);
+ if (err < 0) {
+ dev_err(dev, "Flash write failed: %d\n", err);
+ goto exit_rtnl_lock;
+ }
+ dev_info(dev, "Finished writing flash image\n");
+
+exit_rtnl_lock:
+ rtnl_lock();
+ dev_put(netdev);
+ release_firmware(fw);
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_sset_count = nfp_net_get_sset_count,
.get_rxnfc = nfp_net_get_rxnfc,
.set_rxnfc = nfp_net_set_rxnfc,
+ .flash_device = nfp_net_flash_device,
.get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
@@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
.get_sset_count = nfp_port_get_sset_count,
+ .flash_device = nfp_net_flash_device,
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index c505014121c4..15fa47f622aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -208,12 +208,6 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
return err;
@@ -373,7 +367,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
+ mutex_lock(&pf->lock);
err = nfp_app_init(pf->app);
+ mutex_unlock(&pf->lock);
if (err)
goto err_free;
@@ -401,7 +397,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -414,7 +412,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
+
+ mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
+ mutex_unlock(&pf->lock);
+
nfp_app_free(pf->app);
pf->app = NULL;
}
@@ -570,17 +572,6 @@ err_unmap_ctrl:
return err;
}
-static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
-{
- nfp_net_pf_app_stop(pf);
- /* stop app first, to avoid double free of ctrl vNIC's ddir */
- nfp_net_debugfs_dir_clean(&pf->ddir);
-
- nfp_net_pf_free_irqs(pf);
- nfp_net_pf_app_clean(pf);
- nfp_net_pci_unmap_mem(pf);
-}
-
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -655,9 +646,6 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->vnics))
- nfp_net_pci_remove_finish(pf);
-
return 0;
}
@@ -707,6 +695,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
int stride;
@@ -720,16 +709,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
- mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
- if ((int)pf->max_data_vnics < 0) {
- err = pf->max_data_vnics;
- goto err_unlock;
- }
+ if ((int)pf->max_data_vnics < 0)
+ return pf->max_data_vnics;
err = nfp_net_pci_map_mem(pf);
if (err)
- goto err_unlock;
+ return err;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
@@ -768,6 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
+ err = devlink_register(devlink, &pf->pdev->dev);
+ if (err)
+ goto err_app_clean;
+
+ mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -799,32 +790,39 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
+ mutex_unlock(&pf->lock);
+ cancel_work_sync(&pf->port_refresh_work);
+ devlink_unregister(devlink);
+err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
-err_unlock:
- mutex_unlock(&pf->lock);
- cancel_work_sync(&pf->port_refresh_work);
return err;
}
void nfp_net_pci_remove(struct nfp_pf *pf)
{
- struct nfp_net *nn;
+ struct nfp_net *nn, *next;
mutex_lock(&pf->lock);
- if (list_empty(&pf->vnics))
- goto out;
-
- list_for_each_entry(nn, &pf->vnics, vnic_list)
- if (nfp_net_is_data_vnic(nn))
- nfp_net_pf_clean_vnic(pf, nn);
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nfp_net_is_data_vnic(nn))
+ continue;
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
+ }
- nfp_net_pf_free_vnics(pf);
+ nfp_net_pf_app_stop(pf);
+ /* stop app first, to avoid double free of ctrl vNIC's ddir */
+ nfp_net_debugfs_dir_clean(&pf->ddir);
- nfp_net_pci_remove_finish(pf);
-out:
mutex_unlock(&pf->lock);
+ devlink_unregister(priv_to_devlink(pf));
+
+ nfp_net_pf_free_irqs(pf);
+ nfp_net_pf_app_clean(pf);
+ nfp_net_pci_unmap_mem(pf);
+
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 78b36c67c232..f67da6bde9da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -46,6 +46,13 @@
#include "nfp_net_sriov.h"
#include "nfp_port.h"
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
+{
+ return rcu_dereference_protected(set->reprs[id],
+ lockdep_is_held(&app->pf->lock));
+}
+
static void
nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
int tx_status)
@@ -186,6 +193,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL;
}
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +254,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit,
+ .ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
@@ -336,6 +351,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
if (!netdev)
return NULL;
+ netif_carrier_off(netdev);
+
repr = netdev_priv(netdev);
repr->netdev = netdev;
repr->app = app;
@@ -359,29 +376,45 @@ static void nfp_repr_clean_and_free(struct nfp_repr *repr)
nfp_repr_free(repr);
}
-void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
{
+ struct net_device *netdev;
unsigned int i;
- for (i = 0; i < reprs->num_reprs; i++)
- if (reprs->reprs[i])
- nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_repr_clean_and_free(netdev_priv(netdev));
+ }
kfree(reprs);
}
void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type)
+nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
{
+ struct net_device *netdev;
struct nfp_reprs *reprs;
+ int i;
- reprs = nfp_app_reprs_set(app, type, NULL);
+ reprs = rcu_dereference_protected(app->reprs[type],
+ lockdep_is_held(&app->pf->lock));
if (!reprs)
return;
+ /* Preclean must happen before we remove the reprs reference from the
+ * app below.
+ */
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (netdev)
+ nfp_app_repr_preclean(app, netdev);
+ }
+
+ reprs = nfp_app_reprs_set(app, type, NULL);
+
synchronize_rcu();
- nfp_reprs_clean_and_free(reprs);
+ nfp_reprs_clean_and_free(app, reprs);
}
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
@@ -399,47 +432,29 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
int nfp_reprs_resync_phys_ports(struct nfp_app *app)
{
- struct nfp_reprs *reprs, *old_reprs;
+ struct net_device *netdev;
+ struct nfp_reprs *reprs;
struct nfp_repr *repr;
int i;
- old_reprs =
- rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
- lockdep_is_held(&app->pf->lock));
- if (!old_reprs)
- return 0;
-
- reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
if (!reprs)
- return -ENOMEM;
-
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
- continue;
-
- repr = netdev_priv(old_reprs->reprs[i]);
- if (repr->port->type == NFP_PORT_INVALID)
- continue;
-
- reprs->reprs[i] = old_reprs->reprs[i];
- }
-
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- synchronize_rcu();
+ return 0;
- /* Now we free up removed representors */
- for (i = 0; i < old_reprs->num_reprs; i++) {
- if (!old_reprs->reprs[i])
+ for (i = 0; i < reprs->num_reprs; i++) {
+ netdev = nfp_repr_get_locked(app, reprs, i);
+ if (!netdev)
continue;
- repr = netdev_priv(old_reprs->reprs[i]);
+ repr = netdev_priv(netdev);
if (repr->port->type != NFP_PORT_INVALID)
continue;
- nfp_app_repr_stop(app, repr);
+ nfp_app_repr_preclean(app, netdev);
+ rcu_assign_pointer(reprs->reprs[i], NULL);
+ synchronize_rcu();
nfp_repr_clean(repr);
}
- kfree(old_reprs);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..a621e8ff528e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -35,6 +35,7 @@
#define NFP_NET_REPR_H
struct metadata_dst;
+struct nfp_app;
struct nfp_net;
struct nfp_port;
@@ -47,7 +48,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device *reprs[0];
+ struct net_device __rcu *reprs[0];
};
/**
@@ -89,6 +90,7 @@ struct nfp_repr {
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function
+ * @__NFP_REPR_TYPE_MAX: number of representor types
*/
enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT,
@@ -113,16 +115,18 @@ static inline int nfp_repr_get_port_id(struct net_device *netdev)
return priv->dst->u.port_info.port_id;
}
+struct net_device *
+nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
+ unsigned int id);
+
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
-void
-nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
-void
-nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
- enum nfp_repr_type type);
+void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs);
+void nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
+ enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
int nfp_reprs_resync_phys_ports(struct nfp_app *app);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c879626e035b..b802a1d55449 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -277,12 +277,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
- /* Get ME clock frequency from ctrl BAR
- * XXX for now frequency is hardcoded until we figure out how
- * to get the value from nfp-hwinfo into ctrl bar
- */
- nn->me_freq_mhz = 1200;
-
err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 3ce51f03126f..ced62d112aa2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -49,6 +49,8 @@
struct nfp_hwinfo;
struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo);
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo);
/* Implemented in nfp_nsp.c, low level functions */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 5798adc57cbc..c8f2c064cce3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
void *buffer, size_t length);
int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
const void *buffer, size_t length);
+size_t nfp_cpp_area_size(struct nfp_cpp_area *area);
const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 04dd5758ecf5..ef30597aa319 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
* that it can be accessed directly.
*
* NOTE: @address and @size must be 32-bit aligned values.
- *
- * NOTE: The area must also be 'released' when the structure is freed.
+ * The area must also be 'released' when the structure is freed.
*
* Return: NFP CPP Area handle, or NULL
*/
@@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
* Read data from indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area,
* Write data to indicated CPP region.
*
* NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
*
* Return: length of io, or -ERRNO
*/
@@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
}
/**
+ * nfp_cpp_area_size() - return size of a CPP area
+ * @cpp_area: CPP area handle
+ *
+ * Return: Size of the area
+ */
+size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->size;
+}
+
+/**
* nfp_cpp_area_name() - return name of a CPP area
* @cpp_area: CPP area handle
*
@@ -666,18 +674,20 @@ void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readl(struct nfp_cpp_area *area,
unsigned long offset, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -686,16 +696,18 @@ int nfp_cpp_area_readl(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writel(struct nfp_cpp_area *area,
unsigned long offset, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -704,18 +716,20 @@ int nfp_cpp_area_writel(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_readq(struct nfp_cpp_area *area,
unsigned long offset, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -724,16 +738,18 @@ int nfp_cpp_area_readq(struct nfp_cpp_area *area,
* @offset: Offset into area
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
unsigned long offset, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
+ n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
- return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -1072,7 +1088,7 @@ static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
* @xpb_addr: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
{
@@ -1087,7 +1103,7 @@ int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
* @xpb_addr: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
{
@@ -1105,7 +1121,7 @@ int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
*
* KERNEL: This operation is safe to call in interrupt or softirq context.
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
u32 mask, u32 value)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index ab86bceb93f2..20bad05e2e92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -64,18 +64,20 @@
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 *value)
{
u8 tmp[4];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le32(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le32(tmp);
+ return 0;
}
/**
@@ -85,15 +87,18 @@ int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u32 value)
{
u8 tmp[4];
+ int n;
put_unaligned_le32(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/**
@@ -103,18 +108,20 @@ int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Pointer to read buffer
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 *value)
{
u8 tmp[8];
- int err;
+ int n;
- err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
- *value = get_unaligned_le64(tmp);
+ n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+ if (n != sizeof(tmp))
+ return n < 0 ? n : -EIO;
- return err;
+ *value = get_unaligned_le64(tmp);
+ return 0;
}
/**
@@ -124,15 +131,18 @@ int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
* @address: Address for operation
* @value: Value to write
*
- * Return: length of the io, or -ERRNO
+ * Return: 0 on success, or -ERRNO
*/
int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value)
{
u8 tmp[8];
+ int n;
put_unaligned_le64(value, tmp);
- return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+ n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+
+ return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
}
/* NOTE: This code should not use nfp_xpb_* functions,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 4f24aff1e772..063a9a6243d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
return NULL;
}
+
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->data;
+}
+
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo)
+{
+ return le32_to_cpu(hwinfo->size) - sizeof(u32);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 14a6d1ba51a9..39abac678b71 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -51,6 +51,9 @@
#include "nfp_cpp.h"
#include "nfp_nsp.h"
+#define NFP_NSP_TIMEOUT_DEFAULT 30
+#define NFP_NSP_TIMEOUT_BOOT 30
+
/* Offsets relative to the CSR base */
#define NSP_STATUS 0x00
#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
@@ -93,6 +96,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */
SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
};
@@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
}
static int
-nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
- u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+ u64 mask, u64 val, u32 timeout_sec)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + timeout_sec * HZ;
int err;
for (;;) {
@@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
}
/**
- * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * __nfp_nsp_command() - Execute a command on the NFP Service Processor
* @state: NFP SP state
* @code: NFP SP Command Code
* @option: NFP SP Command Argument
* @buff_cpp: NFP SP Buffer CPP Address info
* @buff_addr: NFP SP Buffer Host address
+ * @timeout_sec:Timeout value to wait for completion in seconds
*
* Return: 0 for success with no result
*
@@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
* -ENODEV if the NSP is not a supported model
* -EBUSY if the NSP is stuck
* -EINTR if interrupted while waiting for completion
- * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
*/
-static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
- u32 buff_cpp, u64 buff_addr)
+static int
+__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr, u32 timeout_sec)
{
u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
@@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return err;
/* Wait for NSP_COMMAND_START to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
err, code);
@@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
}
/* Wait for NSP_STATUS_BUSY to go to 0 */
- err = nfp_nsp_wait_reg(cpp, &reg,
- nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0, timeout_sec);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
err, code);
@@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
return ret_val;
}
-static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
- const void *in_buf, unsigned int in_size,
- void *out_buf, unsigned int out_size)
+static int
+nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+ u64 buff_addr)
+{
+ return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
+static int
+__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size, u32 timeout_sec)
{
struct nfp_cpp *cpp = nsp->cpp;
unsigned int max_size;
@@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return err;
}
- ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
+ timeout_sec);
if (ret < 0)
return ret;
@@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return ret;
}
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
+ out_buf, out_size,
+ NFP_NSP_TIMEOUT_DEFAULT);
+}
+
int nfp_nsp_wait(struct nfp_nsp *state)
{
- const unsigned long wait_until = jiffies + 30 * HZ;
+ const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
int err;
- nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+ nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n",
+ NFP_NSP_TIMEOUT_BOOT);
for (;;) {
const unsigned long start_time = jiffies;
@@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
fw->size, NULL, 0);
}
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
+{
+ /* The flash time is specified to take a maximum of 70s so we add an
+ * additional factor to this spec time.
+ */
+ u32 timeout_sec = 2.5 * 70;
+
+ return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
+ fw->data, fw->size, NULL, 0, timeout_sec);
+}
+
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
{
return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 650ca1a5bd21..e983c9d7f86c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index ecda474ac7c3..46107aefad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -277,10 +277,6 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
break;
}
- if (err == sym->size)
- err = 0;
- else if (err >= 0)
- err = -EIO;
exit:
if (error)
*error = err;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 481876b5424c..66c665d0b926 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,9 +793,9 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- union ring_type get_rx, put_rx, first_rx, last_rx;
+ union ring_type get_rx, put_rx, last_rx;
struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
- struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+ struct nv_skb_map *last_rx_ctx;
struct nv_skb_map *rx_skb;
union ring_type rx_ring;
@@ -822,9 +822,9 @@ struct fe_priv {
/*
* tx specific fields.
*/
- union ring_type get_tx, put_tx, first_tx, last_tx;
+ union ring_type get_tx, put_tx, last_tx;
struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
- struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+ struct nv_skb_map *last_tx_ctx;
struct nv_skb_map *tx_skb;
union ring_type tx_ring;
@@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev)
struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
- if (less_rx-- == np->first_rx.orig)
+ if (less_rx-- == np->rx_ring.orig)
less_rx = np->last_rx.orig;
while (np->put_rx.orig != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev)
wmb();
np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
- np->put_rx.orig = np->first_rx.orig;
+ np->put_rx.orig = np->rx_ring.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
- if (less_rx-- == np->first_rx.ex)
+ if (less_rx-- == np->rx_ring.ex)
less_rx = np->last_rx.ex;
while (np->put_rx.ex != less_rx) {
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
- if (skb) {
+ if (likely(skb)) {
np->put_rx_ctx->skb = skb;
np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
@@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
wmb();
np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
- np->put_rx.ex = np->first_rx.ex;
+ np->put_rx.ex = np->rx_ring.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
- np->put_rx_ctx = np->first_rx_ctx;
+ np->put_rx_ctx = np->rx_skb;
} else {
packet_dropped:
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+ np->get_rx = np->rx_ring;
+ np->put_rx = np->rx_ring;
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
else
np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
- np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->get_rx_ctx = np->rx_skb;
+ np->put_rx_ctx = np->rx_skb;
np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
for (i = 0; i < np->rx_ring_size; i++) {
@@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
int i;
- np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+ np->get_tx = np->tx_ring;
+ np->put_tx = np->tx_ring;
if (!nv_optimized(np))
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
- np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->get_tx_ctx = np->tx_skb;
+ np->put_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
@@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.orig))
- put_tx = np->first_tx.orig;
+ put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.orig))
+ if (unlikely(put_tx == np->tx_ring.orig))
prev_tx = np->last_tx.orig;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (size);
/* setup the fragments */
@@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
do {
nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
- tmp_tx_ctx = np->first_tx_ctx;
+ tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
@@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset += bcnt;
frag_size -= bcnt;
if (unlikely(put_tx++ == np->last_tx.ex))
- put_tx = np->first_tx.ex;
+ put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
- np->put_tx_ctx = np->first_tx_ctx;
+ np->put_tx_ctx = np->tx_skb;
} while (frag_size);
}
- if (unlikely(put_tx == np->first_tx.ex))
+ if (unlikely(put_tx == np->tx_ring.ex))
prev_tx = np->last_tx.ex;
else
prev_tx = put_tx - 1;
- if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx;
else
prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
- if (flags & NV_TX_ERROR) {
+ if (unlikely(flags & NV_TX_ERROR)) {
if ((flags & NV_TX_RETRYERROR)
&& !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
} else {
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
@@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
}
}
if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
- np->get_tx.orig = np->first_tx.orig;
+ np->get_tx.orig = np->tx_ring.orig;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
- if (flags & NV_TX2_ERROR) {
+ if (unlikely(flags & NV_TX2_ERROR)) {
if ((flags & NV_TX2_RETRYERROR)
&& !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
@@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
- np->get_tx.ex = np->first_tx.ex;
+ np->get_tx.ex = np->tx_ring.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
- np->get_tx_ctx = np->first_tx_ctx;
+ np->get_tx_ctx = np->tx_skb;
}
netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
@@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
- np->get_rx.orig = np->first_rx.orig;
+ np->get_rx.orig = np->rx_ring.orig;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
next_pkt:
if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
- np->get_rx.ex = np->first_rx.ex;
+ np->get_rx.ex = np->rx_ring.ex;
if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
- np->get_rx_ctx = np->first_rx_ctx;
+ np->get_rx_ctx = np->rx_skb;
rx_work++;
}
@@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev)
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
*/
- {
- u32 miistat;
- miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- }
+ readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
np->linkspeed = 0;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c9a55b774935..07a2eb3781b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
- &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
- != ETH_ALEN) {
+ if (!mac_pton(maddr, addr)) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 26ddf092e3ec..0ee2490db729 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -85,6 +85,7 @@ config QED
tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI
select ZLIB_INFLATE
+ select CRC8
---help---
This enables the support for ...
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 0a66389c06c2..1cd39c9a0345 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter)
{
int ret = 0;
struct netxen_minidump_template_hdr *hdr;
- struct timespec val;
hdr = (struct netxen_minidump_template_hdr *)
adapter->mdump.md_template;
hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
- jiffies_to_timespec(jiffies, &val);
- hdr->driver_timestamp = (u32) val.tv_sec;
+ hdr->driver_timestamp = ktime_get_seconds();
hdr->driver_info_word2 = adapter->fw_version;
hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
ret = netxen_parse_md_template(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 91003bc6f00b..69488554f4b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -52,10 +52,10 @@
extern const struct qed_common_ops qed_common_ops_pass;
-#define QED_MAJOR_VERSION 8
-#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 33
+#define QED_REVISION_VERSION 0
+#define QED_ENGINEERING_VERSION 20
#define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
return sw_fid;
}
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index afd07ad91631..6f546e869d8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -86,22 +86,22 @@
/* connection context union */
union conn_context {
- struct core_conn_context core_ctx;
- struct eth_conn_context eth_ctx;
- struct iscsi_conn_context iscsi_ctx;
- struct fcoe_conn_context fcoe_ctx;
- struct roce_conn_context roce_ctx;
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
+ struct e4_iscsi_conn_context iscsi_ctx;
+ struct e4_fcoe_conn_context fcoe_ctx;
+ struct e4_roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct iscsi_task_context iscsi_ctx;
- struct fcoe_task_context fcoe_ctx;
+ struct e4_iscsi_task_context iscsi_ctx;
+ struct e4_fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct rdma_task_context roce_ctx;
+ struct e4_rdma_task_context roce_ctx;
};
struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
u64 next;
};
-#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = qed_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 size;
size = min_t(u32, sz_left, p_blk->real_size_in_page);
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
if (!p_virt)
return -ENOMEM;
- memset(p_virt, 0, size);
ilt_shadow[line].p_phys = p_phys;
ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading)
{
- struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.is_first_pf = p_hwfn->first_on_engine;
+ params.is_pf_loading = is_pf_loading;
params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
+ params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, true);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
goto out0;
}
- p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_blk->real_size_in_page,
- &p_phys, GFP_KERNEL);
+ p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
if (!p_virt) {
rc = -ENOMEM;
goto out1;
}
- memset(p_virt, 0, p_blk->real_size_in_page);
/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
* to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 17836349a274..a4e95869889f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
+ * @param is_pf_loading
*/
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index fe7c1f230028..449777f21237 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
-
- p_dest->pf_id = p_src->pf_id;
+ u8 update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 03c3cf77aaff..f2633ec87a6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -21,25 +21,26 @@ enum mem_groups {
MEM_GROUP_DMAE_MEM,
MEM_GROUP_CM_MEM,
MEM_GROUP_QM_MEM,
- MEM_GROUP_TM_MEM,
+ MEM_GROUP_DORQ_MEM,
MEM_GROUP_BRB_RAM,
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
- MEM_GROUP_SDM_MEM,
MEM_GROUP_IOR,
- MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
- MEM_GROUP_RDIF_CTX,
- MEM_GROUP_TDIF_CTX,
- MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_SDM_MEM,
MEM_GROUP_PBUF,
+ MEM_GROUP_RAM,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_IGU_MEM,
MEM_GROUP_IGU_MSIX,
MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
"DMAE_MEM",
"CM_MEM",
"QM_MEM",
- "TM_MEM",
+ "DORQ_MEM",
"BRB_RAM",
"BRB_MEM",
"PRS_MEM",
- "SDM_MEM",
"IOR",
- "RAM",
"BTB_RAM",
- "RDIF_CTX",
- "TDIF_CTX",
- "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "TM_MEM",
+ "SDM_MEM",
"PBUF",
+ "RAM",
"MULD_MEM",
"BTB_MEM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
"IGU_MEM",
"IGU_MSIX",
"CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond14(const u32 *r, const u32 *imm)
-{
- return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
- cond14,
};
/******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
struct platform_defs {
const char *name;
u32 delay_factor;
+ u32 dmae_thresh;
+ u32 log_thresh;
};
/* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
/* Block constant definitions */
struct block_defs {
const char *name;
- bool has_dbg_bus[MAX_CHIP_IDS];
+ bool exists[MAX_CHIP_IDS];
bool associated_to_storm;
/* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
/* Reset register definitions */
struct reset_reg_defs {
u32 addr;
- u32 unreset_val;
bool exists[MAX_CHIP_IDS];
+ u32 unreset_val[MAX_CHIP_IDS];
};
struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
const char *mem_name;
const char *type_name;
u32 addr;
+ u32 entry_width;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
enum dbg_grc_params grc_param;
u32 addr_reg_addr;
u32 data_reg_addr;
- u32 num_of_blocks[MAX_CHIP_IDS];
+ u32 is_256b_reg_addr;
+ u32 is_256b_bit_offset[MAX_CHIP_IDS];
+ u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
};
struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
-#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-#endif
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
#define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
#define NUM_RSS_MEM_TYPES 5
#define NUM_BIG_RAM_TYPES 3
-#define BIG_RAM_BLOCK_SIZE_BYTES 128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
- BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1
-#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
-#endif
#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
-#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
-#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
{{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
{0, 0, 0},
{0, 0, 0},
+ {0, 0, 0} } },
+ { "reserved",
+ {{0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0},
{0, 0, 0} } }
};
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
static struct block_defs block_grc_defs = {
"grc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
"pglue_b",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
static struct block_defs block_cnig_defs = {
"cnig",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
- CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
- CNIG_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+ CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+ CNIG_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 0
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
"ncsi",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {true, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
"bmb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
static struct block_defs block_pcie_defs = {
"pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
"mcp2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
static struct block_defs block_pswhst_defs = {
"pswhst",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
static struct block_defs block_pswhst2_defs = {
"pswhst2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
static struct block_defs block_pswrd_defs = {
"pswrd",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
static struct block_defs block_pswrd2_defs = {
"pswrd2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
static struct block_defs block_pswwr_defs = {
"pswwr",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
"pswrq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
static struct block_defs block_pswrq2_defs = {
"pswrq2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
static struct block_defs block_pglcs_defs = {
"pglcs",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
- PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
- PGLCS_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+ PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+ PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
static struct block_defs block_ptu_defs = {
"ptu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
static struct block_defs block_dmae_defs = {
"dmae",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
static struct block_defs block_tcm_defs = {
"tcm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
static struct block_defs block_mcm_defs = {
"mcm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
static struct block_defs block_ucm_defs = {
"ucm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
static struct block_defs block_xcm_defs = {
"xcm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
static struct block_defs block_ycm_defs = {
"ycm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
static struct block_defs block_pcm_defs = {
"pcm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
static struct block_defs block_qm_defs = {
"qm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
static struct block_defs block_tm_defs = {
"tm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
static struct block_defs block_dorq_defs = {
"dorq",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
static struct block_defs block_brb_defs = {
"brb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
static struct block_defs block_src_defs = {
"src",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
static struct block_defs block_prs_defs = {
"prs",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
static struct block_defs block_tsdm_defs = {
"tsdm",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
static struct block_defs block_msdm_defs = {
"msdm",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
static struct block_defs block_usdm_defs = {
"usdm",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
static struct block_defs block_xsdm_defs = {
"xsdm",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
static struct block_defs block_ysdm_defs = {
"ysdm",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
static struct block_defs block_psdm_defs = {
"psdm",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
static struct block_defs block_tsem_defs = {
"tsem",
- {true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
static struct block_defs block_msem_defs = {
"msem",
- {true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
static struct block_defs block_usem_defs = {
"usem",
- {true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
static struct block_defs block_xsem_defs = {
"xsem",
- {true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
static struct block_defs block_ysem_defs = {
"ysem",
- {true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
static struct block_defs block_psem_defs = {
"psem",
- {true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
static struct block_defs block_rss_defs = {
"rss",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
static struct block_defs block_tmld_defs = {
"tmld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
static struct block_defs block_muld_defs = {
"muld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
static struct block_defs block_yuld_defs = {
"yuld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ {true, true, false}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ MAX_DBG_BUS_CLIENTS},
YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
static struct block_defs block_xyld_defs = {
"xyld",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
};
+static struct block_defs block_ptld_defs = {
+ "ptld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+ PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+ PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+ PTLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 28
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+ YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+ YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+ YPLD_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 27
+};
+
static struct block_defs block_prm_defs = {
"prm",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
static struct block_defs block_pbf_pb1_defs = {
"pbf_pb1",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
static struct block_defs block_pbf_pb2_defs = {
"pbf_pb2",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
static struct block_defs block_rpb_defs = {
"rpb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
static struct block_defs block_btb_defs = {
"btb",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
static struct block_defs block_pbf_defs = {
"pbf",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
static struct block_defs block_rdif_defs = {
"rdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
static struct block_defs block_tdif_defs = {
"tdif",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
static struct block_defs block_cdu_defs = {
"cdu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
static struct block_defs block_ccfc_defs = {
"ccfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
static struct block_defs block_tcfc_defs = {
"tcfc",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
static struct block_defs block_igu_defs = {
"igu",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
static struct block_defs block_cau_defs = {
"cau",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
};
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+ RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+ RGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 30
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc",
+ {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+ TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+ TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+ TGSRC_REG_DBG_FORCE_FRAME_E5,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 31
+};
+
static struct block_defs block_umac_defs = {
"umac",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
- UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
- UMAC_REG_DBG_FORCE_FRAME_K2,
+ {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+ UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+ UMAC_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {true, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
"nig",
- {true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
static struct block_defs block_wol_defs = {
"wol",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
- WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
- WOL_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+ WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+ WOL_REG_DBG_FORCE_FRAME_K2_E5,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
static struct block_defs block_bmbn_defs = {
"bmbn",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
- BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
- BMBN_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+ BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+ BMBN_REG_DBG_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
"nwm",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
- NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
- NWM_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+ NWM_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
static struct block_defs block_nws_defs = {
"nws",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
- NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
- NWS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+ NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+ NWS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
"ms",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
- MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
- MS_REG_DBG_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+ MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+ MS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
"phy_pcie",
- {false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT_K2,
- PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
- PCIE_REG_DBG_COMMON_SHIFT_K2,
- PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
- PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+ {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+ PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_led_defs = {
- "led", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 14
};
static struct block_defs block_avs_wrap_defs = {
- "avs_wrap", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "avs_wrap", {false, true, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 11
};
-static struct block_defs block_rgfs_defs = {
- "rgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
- "rgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
- "tgfs", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
- "tgsrc", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
- "ptld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
- false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
- "ypld", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+ "pxpreqbus", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {true, true, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
+ &block_pxpreqbus_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
static struct platform_defs s_platform_defs[] = {
- {"asic", 1},
- {"reserved", 0},
- {"reserved2", 0},
- {"reserved3", 0}
+ {"asic", 1, 256, 32768},
+ {"reserved", 0, 0, 0},
+ {"reserved2", 0, 0, 0},
+ {"reserved3", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS},
/* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, 0, 0},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0}, 0, 1, false, 1, 0},
+ {{0, 0, 0}, 0, 1, false, 1, 0},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, 0, 1},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, 0, 0},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0}, 0, 1, false, 0, 0}
+ {{0, 0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
- { "rss_mem_cid", "rss_cid", 0,
- {256, 320},
- {32, 32} },
+ { "rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320, 512} },
- { "rss_mem_key_msb", "rss_key", 1024,
- {128, 208},
- {256, 256} },
+ { "rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208, 257} },
- { "rss_mem_key_lsb", "rss_key", 2048,
- {128, 208},
- {64, 64} },
+ { "rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208, 257} },
- { "rss_mem_info", "rss_info", 3072,
- {128, 208},
- {16, 16} },
+ { "rss_mem_info", "rss_info", 3072, 16,
+ {128, 208, 256} },
- { "rss_mem_ind", "rss_ind", 4096,
- {16384, 26624},
- {16, 16} }
+ { "rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624, 32768} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 5632} },
+ MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+ {153600, 180224, 282624} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 3680} },
+ MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+ {92160, 117760, 168960} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152} }
+ MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+ {36864, 36864, 36864} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
/* DBG_RESET_REG_MISCS_PL_UA */
- { MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISCS_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x400, 0x600} },
/* DBG_RESET_REG_MISCS_PL_HV_2 */
- { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
- {false, true} },
+ { MISCS_REG_RESET_PL_HV_2_K2_E5,
+ {false, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_UA */
- { MISC_REG_RESET_PL_UA, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_UA,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_HV */
- { MISC_REG_RESET_PL_HV, 0x0,
- {true, true} },
+ { MISC_REG_RESET_PL_HV,
+ {true, true, true}, {0x0, 0x0, 0x0} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
- { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
- { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
- { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} },
+ { MISC_REG_RESET_PL_PDA_VAUX,
+ {true, true, true}, {0x2, 0x2, 0x2} },
};
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
- {"sgmii_phy", MS_REG_MS_CMU_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+ {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
};
/**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
- dev_data->initialized = true;
+ dev_data->use_dmae = true;
+ dev_data->num_regs_read = 0;
+ dev_data->initialized = 1;
return DBG_STATUS_OK;
}
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM.
*/
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
sizeof(fw_info_location);
dest = (u32 *)&fw_info_location;
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
/* Writes the "last" section (including CRC) to the specified buffer at the
* given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
- u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
{
u32 start_offset = offset;
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
case MEM_GROUP_CFC_MEM:
case MEM_GROUP_CONN_CFC_MEM:
case MEM_GROUP_TASK_CFC_MEM:
- return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
case MEM_GROUP_IGU_MEM:
case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_reset_bit && block->unreset)
+ if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+ block->unreset)
reg_val[block->reset_reg] |=
BIT(block->reset_bit_offset);
}
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
continue;
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ reg_val[i] |=
+ s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
if (reg_val[i])
qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
* The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus)
{
- u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
if (!dump)
return len;
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ /* Print log if needed */
+ dev_data->num_regs_read += len;
+ if (dev_data->num_regs_read >=
+ s_platform_defs[dev_data->platform_id].log_thresh) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers...\n",
+ dev_data->num_regs_read);
+ dev_data->num_regs_read = 0;
+ }
- return offset;
+ /* Try reading using DMAE */
+ if (dev_data->use_dmae &&
+ (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+ wide_bus)) {
+ if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf), len, 0))
+ return len;
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
+
+ /* Read registers */
+ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+ return len;
}
/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
chip = &s_chip_defs[dev_data->chip_id];
chip_platform = &chip->per_platform[dev_data->platform_id];
- if (dump)
- DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "name", buf);
- if (dump)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from %s...\n",
- len, buf);
} else {
/* Dump address */
u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
offset += qed_dump_num_param(dump_buf + offset,
dump, "addr", addr_in_bytes);
- if (dump && len > 64)
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "Dumping %d registers from address 0x%x...\n",
- len, addr_in_bytes);
}
/* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ u32 rss_addr, num_entries, total_dwords;
struct rss_mem_defs *rss_defs;
- u32 addr, size;
+ u32 addr, num_dwords_to_read;
bool packed;
rss_defs = &s_rss_mem_defs[rss_mem_id];
rss_addr = rss_defs->addr;
num_entries = rss_defs->num_entries[dev_data->chip_id];
- entry_width = rss_defs->entry_width[dev_data->chip_id];
- total_dwords = (num_entries * entry_width) / 32;
- packed = (entry_width == 16);
+ total_dwords = (num_entries * rss_defs->entry_width) / 32;
+ packed = (rss_defs->entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
rss_defs->mem_name,
0,
total_dwords,
- entry_width,
+ rss_defs->entry_width,
packed,
rss_defs->type_name, false, 0);
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
}
addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
- size = RSS_REG_RSS_RAM_DATA_SIZE;
- for (i = 0; i < total_dwords; i += size, rss_addr++) {
+ while (total_dwords) {
+ num_dwords_to_read = min_t(u32,
+ RSS_REG_RSS_RAM_DATA_SIZE,
+ total_dwords);
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
addr,
- size,
+ num_dwords_to_read,
false);
+ total_dwords -= num_dwords_to_read;
+ rss_addr++;
}
}
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 total_blocks, ram_size, offset = 0, i;
+ u32 block_size, ram_size, offset = 0, reg_val, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
struct big_ram_defs *big_ram;
big_ram = &s_big_ram_defs[big_ram_id];
- total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
- ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+ ram_size = big_ram->ram_size[dev_data->chip_id];
+
+ reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+ block_size = reg_val &
+ BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+ : 128;
strncpy(type_name, big_ram->instance_name,
strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
mem_name,
0,
ram_size,
- BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ block_size * 8,
false, type_name, false, 0);
/* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
return offset + ram_size;
/* Dump Big RAM */
- for (i = 0; i < total_blocks / 2; i++) {
+ for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+ i++) {
u32 addr, len;
qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
- len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ len = BRB_REG_BIG_RAM_DATA_SIZE;
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
- MCP_REG_SCRATCH_SIZE,
+ MCP_REG_SCRATCH_SIZE_BB_K2,
false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
phy_defs->tbus_data_lo_addr;
data_hi_addr = phy_defs->base_addr +
phy_defs->tbus_data_hi_addr;
- bytes_buf = (u8 *)(dump_buf + offset);
if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
continue;
}
+ bytes_buf = (u8 *)(dump_buf + offset);
for (tbus_hi_offset = 0;
tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 block_id, line_id, offset = 0;
- /* Skip static debug if a debug bus recording is in progress */
- if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ /* Don't dump static debug if a debug bus recording is in progress */
+ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
return 0;
if (dump) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG, "Dumping static debug data...\n");
-
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
struct block_defs *block = s_block_defs[block_id];
- if (block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] !=
+ MAX_DBG_BUS_CLIENTS)
qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
0);
}
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
u32 block_dwords, addr, len;
u8 dbg_client_id;
- if (!block->has_dbg_bus[dev_data->chip_id])
+ if (block->dbg_client_id[dev_data->chip_id] ==
+ MAX_DBG_BUS_CLIENTS)
continue;
- block_desc =
- get_dbg_bus_block_desc(p_hwfn,
- (enum block_id)block_id);
+ block_desc = get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
block_dwords = NUM_DBG_LINES(block_desc) *
STATIC_DEBUG_LINE_DWORDS;
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
if (dump) {
/* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
u32 next_reg_offset = 0;
- if (!dump) {
- offset += qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- entry_id,
- NULL);
- (*num_failing_rules)++;
- break;
- }
-
/* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
u32 padded_entry_size, addr;
bool wide_bus;
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size)
- : 1;
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
entry_id,
cond_reg_values);
(*num_failing_rules)++;
- break;
}
}
}
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
dump, "num_rules", num_failing_rules);
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
(nvram_offset_bytes +
read_offset) |
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
(u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
offset += trace_meta_size_dwords;
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+ len = REG_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
- dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
- REG_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
- (u64)(uintptr_t)(&dump_buf[offset]),
- REG_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
}
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 dwords_read, size_param_offset, offset = 0;
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
* buffer size since more entries could be added to the buffer as we are
* emptying it.
*/
+ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+ len = IGU_FIFO_ELEMENT_DWORDS;
for (dwords_read = 0;
fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
- dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
- IGU_FIFO_ELEMENT_DWORDS) {
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- IGU_REG_ERROR_HANDLING_MEMORY,
- (u64)(uintptr_t)(&dump_buf[offset]),
- IGU_FIFO_ELEMENT_DWORDS, 0))
- return DBG_STATUS_DMAE_FAILED;
- fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true);
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
}
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
dwords_read);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
bool dump,
u32 *num_dumped_dwords)
{
- u32 size_param_offset, override_window_dwords, offset = 0;
+ u32 size_param_offset, override_window_dwords, offset = 0, addr;
*num_dumped_dwords = 0;
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Add override window info to buffer */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt,
- GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
- if (qed_dmae_grc2host(p_hwfn, p_ptt,
- GRC_REG_PROTECTION_OVERRIDE_WINDOW,
- (u64)(uintptr_t)(dump_buf + offset),
- override_window_dwords, 0))
- return DBG_STATUS_DMAE_FAILED;
- offset += override_window_dwords;
+ qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true);
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
out:
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
*num_dumped_dwords = offset;
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
- last_list_idx = (next_list_idx > 0
- ? next_list_idx
- : asserts->list_num_elements) - 1;
+ last_list_idx = (next_list_idx > 0 ?
+ next_list_idx :
+ asserts->list_num_elements) - 1;
addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
asserts->list_dword_offset +
last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
}
/* Dump last section */
- offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+ offset += qed_dump_last_section(dump_buf, offset, dump);
return offset;
}
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
-
/***************************** Constant Arrays *******************************/
struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
{"phy_pcie", BLOCK_PHY_PCIE},
{"led", BLOCK_LED},
{"avs_wrap", BLOCK_AVS_WRAP},
+ {"pxpreqbus", BLOCK_PXPREQBUS},
{"misc_aeu", BLOCK_MISC_AEU},
{"bar0_map", BLOCK_BAR0_MAP}
};
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
- /* DBG_STATUS_DMAE_FAILED */
- "DMAE transaction failed",
+ /* DBG_STATUS_RESERVED2 */
+ "Reserved debug status - shouldn't be returned",
/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
if (*(char_buf + offset++)) {
/* String param */
*param_str_val = char_buf + offset;
+ *param_num_val = 0;
offset += strlen(*param_str_val) + 1;
if (offset & 0x3)
offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
u32 *dump_buf_end,
u32 num_rules,
bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"FW_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
true,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nLSI_IDLE_CHECK:\n");
rules_print_size =
- qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
- dump_buf_end, num_rules,
+ qed_parse_idle_chk_dump_rules(dump_buf,
+ dump_buf_end,
+ num_rules,
false,
results_buf ?
results_buf +
- results_offset : NULL,
- num_errors, num_warnings);
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
results_offset += rules_print_size;
if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
*/
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- u32 num_dumped_dwords,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6725,9 +6794,7 @@ free_mem:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
*element, char
*results_buf,
- u32 *results_offset,
- u32 *parsed_results_bytes)
+ u32 *results_offset)
{
const struct igu_fifo_addr_data *found_addr = NULL;
u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
prod_cons,
update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
- : "enable",
+ en_dis_int_for_sb ?
+ (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+ "enable",
segment ? "attn" : "regular",
timer_mask);
}
@@ -6969,9 +7035,7 @@ out:
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_elements; i++) {
status = qed_parse_igu_fifo_element(&elements[i],
results_buf,
- &results_offset,
- parsed_results_bytes);
+ &results_offset);
if (status != DBG_STATUS_OK)
return status;
}
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
* parsed_results_bytes.
* The parsing status is returned.
*/
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
char *results_buf,
u32 *parsed_results_bytes)
{
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
{
u32 num_errors, num_warnings;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
NULL,
results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf,
- u32 *num_errors, u32 *num_warnings)
+ u32 *num_errors,
+ u32 *num_warnings)
{
u32 parsed_buf_size;
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
+ return qed_parse_idle_chk_dump(dump_buf,
num_dumped_dwords,
results_buf,
&parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *results_buf_size)
{
return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
- num_dumped_dwords,
results_buf, &parsed_buf_size);
}
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
}
enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
NULL, results_buf_size);
}
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_protection_override_dump(dump_buf,
results_buf,
&parsed_buf_size);
}
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords,
u32 *results_buf_size)
{
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
+ return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
}
enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
- return qed_parse_fw_asserts_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
+ return qed_parse_fw_asserts_dump(dump_buf,
results_buf, &parsed_buf_size);
}
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Go over registers with a non-zero attention status */
for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_bit_mapping *bit_mapping;
struct dbg_attn_reg_result *reg_result;
- struct dbg_attn_bit_mapping *mapping;
u8 num_reg_attn, bit_idx = 0;
reg_result = &results->reg_results[i];
num_reg_attn = GET_FIELD(reg_result->data,
DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
- mapping = &((struct dbg_attn_bit_mapping *)
- block_attn->ptr)[reg_result->block_attn_offset];
+ bit_mapping = &((struct dbg_attn_bit_mapping *)
+ block_attn->ptr)[reg_result->block_attn_offset];
pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
/* Go over attention status bits */
for (j = 0; j < num_reg_attn; j++) {
- u16 attn_idx_val = GET_FIELD(mapping[j].data,
+ u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
- u32 name_offset, sts_addr;
+ u32 attn_name_offset, sts_addr;
/* Check if bit mask should be advanced (due to unused
* bits).
*/
- if (GET_FIELD(mapping[j].data,
+ if (GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
bit_idx += (u8)attn_idx_val;
continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Find attention name */
- name_offset = block_attn_name_offsets[attn_idx_val];
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
attn_name = &((const char *)
- pstrings->ptr)[name_offset];
+ pstrings->ptr)[attn_name_offset];
attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
"Interrupt" : "Parity";
masked_str = reg_result->mask_val & BIT(bit_idx) ?
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 58a689fb04db..553a6d17260e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn, p_ptt);
+ qed_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
}
- /* Protocl Configuration */
+ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* No need for a case for QED_CMDQS_CQS since
* CNQ/CMDQS are the same resource.
*/
- resc_max_val = NUM_OF_CMDQS_CQS;
+ resc_max_val = NUM_OF_GLOBAL_QUEUES;
break;
case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index df195c02b711..2dc9b312a795 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct fcoe_conn_context *p_cxt = NULL;
+ struct e4_fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct fcoe_task_context *p_task_ctx = NULL;
+ struct e4_fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
SET_FIELD(p_task_ctx->timer_context.logical_client_0,
TIMERS_CONTEXT_VALIDLC0, 1);
SET_FIELD(p_task_ctx->timer_context.logical_client_1,
TIMERS_CONTEXT_VALIDLC1, 1);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 3427fe7049b5..de873d770575 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -54,7 +54,7 @@
struct qed_hwfn;
struct qed_ptt;
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
@@ -82,487 +82,7 @@ enum common_ramrod_cmd_id {
MAX_COMMON_RAMROD_CMD_ID
};
-/* The core storm context for the Ystorm */
-struct ystorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* The core storm context for the Pstorm */
-struct pstorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* Core Slowpath Connection storm context of Xstorm */
-struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
- __le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
-};
-
-struct xstorm_core_conn_ag_ctx {
- u8 reserved0;
- u8 core_state;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 consolid_prod;
- __le16 reserved16;
- __le16 tx_bd_cons;
- __le16 tx_bd_or_spq_prod;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le16 word7;
- __le16 word8;
- __le16 word9;
- __le16 word10;
- __le32 reg7;
- __le32 reg8;
- __le32 reg9;
- u8 byte7;
- u8 byte8;
- u8 byte9;
- u8 byte10;
- u8 byte11;
- u8 byte12;
- u8 byte13;
- u8 byte14;
- u8 byte15;
- u8 e5_reserved;
- __le16 word11;
- __le32 reg10;
- __le32 reg11;
- __le32 reg12;
- __le32 reg13;
- __le32 reg14;
- __le32 reg15;
- __le32 reg16;
- __le32 reg17;
- __le32 reg18;
- __le32 reg19;
- __le16 word12;
- __le16 word13;
- __le16 word14;
- __le16 word15;
-};
-
-struct tstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le32 reg7;
- __le32 reg8;
- u8 byte2;
- u8 byte3;
- __le16 word0;
- u8 byte4;
- u8 byte5;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le32 reg9;
- __le32 reg10;
-};
-
-struct ustorm_core_conn_ag_ctx {
- u8 reserved;
- u8 byte1;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
- u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le16 word1;
- __le32 rx_producers;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le16 word2;
- __le16 word3;
-};
-
-/* The core storm context for the Mstorm */
-struct mstorm_core_conn_st_ctx {
- __le32 reserved[24];
-};
-
-/* The core storm context for the Ustorm */
-struct ustorm_core_conn_st_ctx {
- __le32 reserved[4];
-};
-
-/* core connection context */
-struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
-};
-
+/* How ll2 should deal with packet upon errors */
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
@@ -570,21 +90,25 @@ enum core_error_handle {
MAX_CORE_ERROR_HANDLE
};
+/* Opcodes for the event ring */
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
+/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
@@ -592,6 +116,7 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
+/* Ethernet TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
@@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_bcast_pkts;
};
+/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
@@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
@@ -628,53 +155,64 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
+ CORE_RAMROD_TX_QUEUE_UPDATE,
MAX_CORE_RAMROD_CMD_ID
};
+/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
+/* Core RX BD for Light L2 */
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
+/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
+/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
+/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {
- CORE_RX_CQE_ILLIGAL_TYPE,
+ CORE_RX_CQE_ILLEGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
+/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
@@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe {
__le32 reserved1[3];
};
+/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
@@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe {
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
- __le32 gid_dst[4];
+ __le32 src_qp;
+ __le32 reserved[3];
};
+/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
@@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe {
__le32 reserved1[5];
};
+/* Core RX CM offload BD for Light L2 */
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
@@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data {
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
- u8 inner_vlan_removal_en;
+ u8 inner_vlan_stripping_en;
+ u8 report_outer_vlan;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
- u8 reserved[7];
+ u8 reserved[6];
};
+/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
@@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
+/* Flags for Core TX BD */
struct core_tx_bd_data {
__le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_DATA_START_BD_MASK 0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT 2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT 8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
-#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
-#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
-};
-
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
+};
+
+/* Core TX BD for Light L2 */
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x3
-#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
+/* Light L2 TX Destination */
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
@@ -789,6 +339,7 @@ enum core_tx_dest {
MAX_CORE_TX_DEST
};
+/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
@@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data {
u8 resrved[3];
};
+/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg;
+ u8 reserved0;
+ __le16 qm_pq_id;
+ __le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {
DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
@@ -815,6 +376,487 @@ enum dcb_dscp_update_mode {
MAX_DCB_DSCP_UPDATE_MODE
};
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct e4_core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+ u8 vf_id;
+ u8 err_id;
+ __le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ union rdma_eqe_data rdma_data;
+ struct malicious_vf_eqe_data malicious_vf;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
/* Event Ring Next Page Address */
struct event_ring_next_addr {
struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+/* Ports mode */
enum fw_flow_ctrl_mode {
flow_ctrl_pause,
flow_ctrl_pfc,
MAX_FW_FLOW_CTRL_MODE
};
+/* GFT profile type */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE,
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_PORT,
+ MAX_GFT_PROFILE_TYPE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
};
enum iwarp_ll2_tx_queues {
- IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR,
MAX_IWARP_LL2_TX_QUEUES
};
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
+ ETH_PACKET_SIZE_TOO_LARGE,
MAX_MALICIOUS_VF_ERROR_ID
};
+/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+ __le16 tpid;
+ __le16 tci;
+};
+/* outer tag configurations */
+struct outer_tag_config_struct {
+ u8 enable_stag_pri_change;
+ u8 pri_map_valid;
+ u8 reserved[2];
+ struct vlan_header outer_tag;
+ u8 inner_to_outer_pri_map[8];
};
/* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
- PERSONALITY_RESERVED4,
+ PERSONALITY_RESERVED,
MAX_PERSONALITY_TYPE
};
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
- __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
u8 mf_mode;
u8 integ_phase;
u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- __le32 outer_tag;
+ u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
+ struct outer_tag_config_struct outer_tag_config;
};
+/* Data for port update ramrod */
struct protocol_dcb_data {
u8 dcb_enable_flag;
- u8 reserved_a;
+ u8 dscp_enable_flag;
u8 dcb_priority;
u8 dcb_tc;
- u8 reserved_b;
+ u8 dscp_val;
u8 reserved0;
};
+/* Update tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
__le16 reserved;
};
+/* Data for port update ramrod */
struct pf_update_ramrod_data {
- u8 pf_id;
u8 update_eth_dcb_data_mode;
u8 update_fcoe_dcb_data_mode;
u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
+ u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
- __le16 reserved;
+ u8 enable_stag_pri_change;
+ u8 reserved;
struct pf_update_tunnel_config tunnel_config;
};
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
+/* RDMA TX Stats */
struct rdma_sent_stats {
struct regpair sent_bytes;
struct regpair sent_pkts;
};
+/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
__le16 echo;
};
+/* RDMA RX Stats */
struct rdma_rcv_stats {
struct regpair rcv_bytes;
struct regpair rcv_pkts;
};
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg;
+ __le32 rl_bc_rate;
+ __le16 rl_max_rate;
+ __le16 rl_r_ai;
+ __le16 rl_r_hai;
+ __le16 dcqcn_g;
+ __le32 dcqcn_k_us;
+ __le32 dcqcn_timeuot_us;
+ __le32 qcn_timeuot_us;
+ __le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
struct regpair roce_irregular_pkt;
struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
- struct regpair reserved1;
+ struct regpair toe_irregular_pkt;
struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt;
};
/* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+/* VF zone size mode */
enum vf_zone_size_mode {
VF_ZONE_SIZE_MODE_DEFAULT,
VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
MAX_VF_ZONE_SIZE_MODE
};
+/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
__le32 reserved1;
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
-};
-
/* DMAE command */
struct dmae_cmd {
__le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PTLD = 0x590000,
- GRCBASE_YPLD = 0x5b0000,
+ GRCBASE_PTLD = 0x5a0000,
+ GRCBASE_YPLD = 0x5c0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_PXPREQBUS = 0x56000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
struct dbg_attn_reg_result reg_results[15];
};
-/* mode header */
+/* Mode header */
struct dbg_mode_hdr {
__le16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
__le32 mask_address;
};
-/* attention types */
+/* Attention types */
enum dbg_attn_type {
ATTN_TYPE_INTERRUPT,
ATTN_TYPE_PARITY,
MAX_DBG_ATTN_TYPE
};
+/* Debug Bus block data */
struct dbg_bus_block {
u8 num_of_lines;
u8 has_latency_events;
__le16 lines_offset;
};
+/* Debug Bus block user data */
struct dbg_bus_block_user_data {
u8 num_of_lines;
u8 has_latency_events;
__le16 names_offset;
};
+/* Block Debug line data */
struct dbg_bus_line {
u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
u8 group_sizes;
};
-/* condition header for registers dump */
+/* Condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
u8 block_id; /* block ID */
u8 data_size; /* size in dwords of the data following this header */
};
-/* memory data for registers dump */
+/* Memory data for registers dump */
struct dbg_dump_mem {
__le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
__le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
-/* register data for registers dump */
+/* Register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
-/* split header for registers dump */
+/* Split header for registers dump */
struct dbg_dump_split_hdr {
__le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
};
-/* condition header for idle check */
+/* Condition header for idle check */
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
__le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
__le16 num_entries;
u8 entry_size;
u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
struct dbg_mode_hdr mode; /* Mode header */
};
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
/* Idle Check rule parsing data */
struct dbg_idle_chk_rule_parsing_data {
__le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
};
-/* idle check severity types */
+/* Idle check severity types */
enum dbg_idle_chk_severity_types {
/* idle check failure should cause an error */
IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
__le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num;
u8 hw_id;
};
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+/* Debug Bus constraint operation types */
enum dbg_bus_constraint_ops {
DBG_BUS_CONSTRAINT_OP_EQ,
DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS
};
+/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
};
/* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
struct dbg_bus_storm_data storms[6];
};
+/* Debug bus filter types */
enum dbg_bus_filter_types {
DBG_BUS_FILTER_TYPE_OFF,
DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+/* Debug bus other engine mode */
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
MAX_DBG_BUS_OTHER_ENGINE_MODES
};
+/* Debug bus post-trigger recording types */
enum dbg_bus_post_trigger_types {
DBG_BUS_POST_TRIGGER_RECORD,
DBG_BUS_POST_TRIGGER_DROP,
MAX_DBG_BUS_POST_TRIGGER_TYPES
};
+/* Debug bus pre-trigger recording types */
enum dbg_bus_pre_trigger_types {
DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
MAX_DBG_BUS_PRE_TRIGGER_TYPES
};
+/* Debug bus SEMI frame modes */
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
- 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
- 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+/* Debug Bus Storm modes */
enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_PRINTF,
DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
u8 chip_id;
u8 platform_id;
u8 initialized;
- u8 reserved;
+ u8 use_dmae;
+ __le32 num_regs_read;
};
/********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* BRB RAM init requirements */
struct init_brb_ram_req {
__le32 guranteed_per_tc;
__le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
u8 num_active_tcs[MAX_NUM_PORTS];
};
+/* ETS per-TC init requirements */
struct init_ets_tc_req {
u8 use_sp;
u8 use_wfq;
__le16 weight;
};
+/* ETS init requirements */
struct init_ets_req {
__le32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
+/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {
__le16 lb_mac_rate;
__le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
__le16 tc_rate[NUM_OF_PHYS_TCS];
};
+/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry {
u8 tc_id;
u8 valid;
};
+/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
+/* QM per-port init parameters */
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
__le32 length;
};
-/* binary init buffer types */
+/* Binary init buffer types */
enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
};
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
@@ -2802,6 +2957,18 @@ struct iro {
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
#define MAX_NAME_LEN 16
/***************************** Public Functions *******************************/
+
/**
* @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
* debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *num_warnings);
/**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ * meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
*
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
0x00000000, /* bar0_map, bb, 0 lines */
0x00000000, /* bar0_map, k2, 0 lines */
0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
};
/* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
u8 port_id;
u8 pf_id;
u8 max_phys_tcs_per_port;
- bool is_first_pf;
+ bool is_pf_loading;
u32 num_pf_cids;
u32 num_vf_cids;
u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
u8 num_vports;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
};
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed);
+
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ * QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/**
* @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
*
+ * @param p_hwfn
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6);
-
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+
+/* Tstorm ll2 port statistics */
#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
(IRO[2].base + ((port_id) * IRO[2].m1))
#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + (pf_id) * IRO[4].m1)
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
(IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+
+/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size)
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
(IRO[19].base + ((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+ (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
- (IRO[27].base + ((ethtype) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+ (IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
(IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[30].base + ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+ (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+ (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+
+/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+
+/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-static const struct iro iro_arr[49] = {
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+
+static const struct iro iro_arr[51] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6518, 0x20, 0x0, 0x0, 0x20},
+ {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+ {0x6530, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4c48, 0x0, 0x0, 0x0, 0x78},
+ {0x3e18, 0x0, 0x0, 0x0, 0x78},
+ {0x2b58, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78},
- {0x3df0, 0x0, 0x0, 0x0, 0x78},
- {0x29b0, 0x0, 0x0, 0x0, 0x78},
- {0x4c38, 0x0, 0x0, 0x0, 0x78},
- {0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7f48, 0x0, 0x0, 0x0, 0x78},
+ {0x4998, 0x0, 0x0, 0x0, 0x78},
+ {0x7f50, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x61f8, 0x10, 0x0, 0x0, 0x10},
- {0xbd20, 0x30, 0x0, 0x0, 0x30},
- {0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x4b60, 0x80, 0x0, 0x0, 0x40},
+ {0x6210, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x96c0, 0x30, 0x0, 0x0, 0x30},
+ {0x4b68, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc7c8, 0x0, 0x0, 0x0, 0x4},
- {0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8150, 0x40, 0x0, 0x0, 0x30},
- {0xec70, 0x60, 0x0, 0x0, 0x60},
- {0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf1b0, 0x78, 0x0, 0x0, 0x78},
+ {0x53a8, 0x80, 0x4, 0x0, 0x4},
+ {0xc7d0, 0x0, 0x0, 0x0, 0x4},
+ {0x4ba8, 0x80, 0x0, 0x0, 0x20},
+ {0x8158, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+ {0xf2b8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xaef8, 0x0, 0x0, 0x0, 0xf0},
- {0xafe8, 0x8, 0x0, 0x0, 0x8},
+ {0xaf20, 0x0, 0x0, 0x0, 0xf0},
+ {0xb010, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
{0x24f8, 0x8, 0x0, 0x0, 0x8},
{0x0, 0x8, 0x0, 0x0, 0x8},
- {0x200, 0x10, 0x8, 0x0, 0x8},
- {0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd9a8, 0x38, 0x0, 0x0, 0x24},
- {0x12988, 0x10, 0x0, 0x0, 0x8},
- {0x11fa0, 0x38, 0x0, 0x0, 0x18},
- {0xa580, 0x38, 0x0, 0x0, 0x10},
- {0x86f8, 0x30, 0x0, 0x0, 0x18},
- {0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xde28, 0x48, 0x0, 0x0, 0x38},
- {0x10660, 0x20, 0x0, 0x0, 0x20},
- {0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5020, 0x10, 0x0, 0x0, 0x10},
- {0xc9b0, 0x30, 0x0, 0x0, 0x10},
- {0xeec0, 0x10, 0x0, 0x0, 0x10},
+ {0x400, 0x18, 0x8, 0x0, 0x8},
+ {0xb78, 0x18, 0x8, 0x0, 0x2},
+ {0xd898, 0x50, 0x0, 0x0, 0x3c},
+ {0x12908, 0x18, 0x0, 0x0, 0x10},
+ {0x11aa8, 0x40, 0x0, 0x0, 0x18},
+ {0xa588, 0x50, 0x0, 0x0, 0x20},
+ {0x8700, 0x40, 0x0, 0x0, 0x28},
+ {0x10300, 0x18, 0x0, 0x0, 0x10},
+ {0xde48, 0x48, 0x0, 0x0, 0x38},
+ {0x10768, 0x20, 0x0, 0x0, 0x20},
+ {0x2d28, 0x80, 0x0, 0x0, 0x10},
+ {0x5048, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+ {0xeee0, 0x10, 0x0, 0x0, 0x10},
+ {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+ {0x13108, 0x8, 0x0, 0x0, 0x8},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30855
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30905
-#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31675
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32699
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33211
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
-#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_error_code {
ETH_OK = 0x00,
ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
MAX_ETH_ERROR_CODE
};
+/* Opcodes for the event ring */
enum eth_event_opcode {
ETH_EVENT_UNUSED,
ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
ETH_EVENT_RX_QUEUE_UPDATE,
ETH_EVENT_RX_QUEUE_STOP,
ETH_EVENT_FILTERS_UPDATE,
- ETH_EVENT_RESERVED,
- ETH_EVENT_RESERVED2,
- ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
ETH_EVENT_RX_ADD_UDP_FILTER,
ETH_EVENT_RX_DELETE_UDP_FILTER,
- ETH_EVENT_RESERVED4,
- ETH_EVENT_RESERVED5,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG,
ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+/* eth IPv4 Fragment Type */
enum eth_ip_type {
ETH_IPV4,
ETH_IPV6,
MAX_ETH_IP_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE,
MAX_ETH_RAMROD_CMD_ID
};
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
struct eth_return_code {
u8 value;
#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+/* GFT filter update action type */
enum gft_filter_update_action {
GFT_ADD_FILTER,
GFT_DELETE_FILTER,
MAX_GFT_FILTER_UPDATE_ACTION
};
-enum gft_logic_filter_type {
- GFT_FILTER_TYPE,
- RFS_FILTER_TYPE,
- MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
struct rx_add_openflow_filter_data {
__le16 action_icid;
u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
+/* Ramrod data for rx create gft action */
struct rx_create_gft_action_data {
u8 vport_id;
u8 reserved[7];
};
+/* Ramrod data for rx create openflow action */
struct rx_create_openflow_action_data {
u8 vport_id;
u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
struct regpair reserved2;
};
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id;
u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
struct rx_update_gft_filter_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
- __le16 rx_qid_or_action_icid;
- u8 vport_id;
- u8 filter_type;
+ __le16 action_icid;
+ __le16 rx_qid;
+ __le16 flow_id;
+ __le16 vport_id;
+ u8 action_icid_valid;
+ u8 rx_qid_valid;
+ u8 flow_id_valid;
u8 filter_action;
u8 assert_on_error;
+ u8 reserved;
};
/* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg;
+ __le16 qm_pq_id;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
__le32 reg4;
};
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
- u8 eth_state;
+ u8 state;
u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 ereserved1;
+ __le16 e5_reserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
+/* GFT CAM line struct */
struct gft_cam_line {
__le32 camline;
#define GFT_CAM_LINE_VALID_MASK 0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
#define GFT_CAM_LINE_RESERVED1_SHIFT 29
};
+/* GFT CAM line struct with fields breakout */
struct gft_cam_line_mapped {
__le32 camline;
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
struct gft_cam_line_mapped cam_line_mapped;
};
+/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
GFT_PROFILE_IPV6 = 1,
MAX_GFT_PROFILE_IP_VERSION
};
+/* Profile key stucr fot GFT logic in Prs */
struct gft_profile_key {
__le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
-#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
-#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
-};
-
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
enum gft_profile_tunnel_type {
GFT_PROFILE_NO_TUNNEL = 0,
GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
MAX_GFT_PROFILE_TUNNEL_TYPE
};
+/* Used in gft_profile_key: Indication for protocol type */
enum gft_profile_upper_protocol_type {
GFT_PROFILE_ROCE_PROTOCOL = 0,
GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
};
+/* GFT RAM line struct */
struct gft_ram_line {
__le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_RESERVED1_SHIFT 10
};
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
enum gft_vlan_select {
INNER_PROVIDER_VLAN = 0,
INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
MAX_GFT_VLAN_SELECT
};
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
struct rdma_close_func_ramrod_data {
u8 cnq_start_offset;
u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
u8 reserved[4];
};
+/* rdma function init CNQ parameters */
struct rdma_cnq_params {
__le16 sb_num;
u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
u8 reserved1[6];
};
+/* rdma create cq ramrod data */
struct rdma_create_cq_ramrod_data {
struct regpair cq_handle;
struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
__le16 reserved1;
};
+/* rdma deregister tid ramrod data */
struct rdma_deregister_tid_ramrod_data {
__le32 itid;
__le32 reserved;
};
+/* rdma destroy cq output params */
struct rdma_destroy_cq_output_params {
__le16 cnq_num;
__le16 reserved0;
__le32 reserved1;
};
+/* rdma destroy cq ramrod data */
struct rdma_destroy_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* RDMA slow path EQ cmd IDs */
enum rdma_event_opcode {
RDMA_EVENT_UNUSED,
RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
MAX_RDMA_EVENT_OPCODE
};
+/* RDMA FW return code for slow path ramrods */
enum rdma_fw_return_code {
RDMA_RETURN_OK = 0,
RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
MAX_RDMA_FW_RETURN_CODE
};
+/* rdma function init header */
struct rdma_init_func_hdr {
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
u8 vf_id;
u8 vf_valid;
- u8 reserved[3];
+ u8 relaxed_ordering;
+ u8 reserved[2];
};
+/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
MAX_RDMA_RAMROD_CMD_ID
};
+/* rdma register tid ramrod data */
struct rdma_register_tid_ramrod_data {
__le16 flags;
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
__le32 reserved4[2];
};
+/* rdma resize cq output params */
struct rdma_resize_cq_output_params {
__le32 old_cq_cons;
__le32 old_cq_prod;
};
+/* rdma resize cq ramrod data */
struct rdma_resize_cq_ramrod_data {
u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
u8 pbl_log_page_size;
__le16 pbl_num_pages;
__le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
struct regpair output_params_addr;
};
+/* The rdma storm context of Mstorm */
struct rdma_srq_context {
struct regpair temp[8];
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data {
struct regpair pbl_base_addr;
__le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
struct regpair producers_addr;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_destroy_ramrod_data {
struct rdma_srq_id srq_id;
__le32 reserved;
};
+/* rdma create qp requester ramrod data */
struct rdma_srq_modify_ramrod_data {
struct rdma_srq_id srq_id;
__le32 wqe_limit;
};
-struct ystorm_rdma_task_st_ctx {
- struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 msem_ctx_upd_seq;
- u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 key;
- __le32 mw_cnt;
- u8 ref_cnt_seq;
- u8 ctx_upd_seq;
- __le16 dif_flags;
- __le16 tx_ref_count;
- __le16 last_used_ltid;
- __le16 parent_mr_lo;
- __le16 parent_mr_hi;
- __le32 fbo_lo;
- __le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
- struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 icid;
- u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
- u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
- u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 reg2;
- __le32 dif_runt_value;
- __le32 reg4;
- __le32 reg5;
-};
-
-struct rdma_task_context {
- struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
- struct tdif_task_context tdif_context;
- struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
- struct mstorm_rdma_task_st_ctx mstorm_st_context;
- struct rdif_task_context rdif_context;
- struct ustorm_rdma_task_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
- struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
enum rdma_tid_type {
RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
__le32 reg10;
};
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
__le32 reg6;
};
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
__le32 reg3;
};
-struct mstorm_roce_conn_st_ctx {
- struct regpair temp[6];
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
};
+/* The roce storm context of Mstorm */
struct pstorm_roce_conn_st_ctx {
struct regpair temp[16];
};
-struct ystorm_roce_conn_st_ctx {
- struct regpair temp[2];
-};
-
+/* The roce storm context of Xstorm */
struct xstorm_roce_conn_st_ctx {
struct regpair temp[24];
};
+/* The roce storm context of Tstorm */
struct tstorm_roce_conn_st_ctx {
struct regpair temp[30];
};
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
struct ustorm_roce_conn_st_ctx {
struct regpair temp[12];
};
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
- struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct mstorm_roce_conn_st_ctx mstorm_st_context;
struct ustorm_roce_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
};
+/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
__le16 dpi;
};
+/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
__le16 dpi;
};
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+ struct regpair ecn_pkt_rcv;
+ struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+ struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp requester ramrod data */
struct roce_destroy_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
__le32 cq_prod;
};
+/* RoCE destroy qp responder ramrod data */
struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* roce special events statistics */
struct roce_events_stats {
__le16 silent_drops;
__le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
__le32 reserved;
};
+/* ROCE slow path EQ cmd IDs */
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
MAX_ROCE_EVENT_OPCODE
};
+/* roce func init ramrod data */
struct roce_init_func_params {
u8 ll2_queue_id;
u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
__le32 cnp_send_timeout;
};
+/* roce func init ramrod data */
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct roce_init_func_params roce;
};
+/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
u8 max_ord;
u8 traffic_class;
u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
__le32 ack_timeout_val;
__le16 mtu;
__le16 reserved2;
- __le32 reserved3[3];
+ __le32 reserved3[2];
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* roce modify qp responder ramrod data */
struct roce_modify_qp_resp_ramrod_data {
__le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
__le16 p_key;
__le32 flow_label;
__le16 mtu;
- __le16 reserved2;
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
+ u8 reserved2[6];
__le32 src_gid[4];
__le32 dst_gid[4];
};
+/* RoCE query qp requester output params */
struct roce_query_qp_req_output_params {
__le32 psn;
__le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
};
+/* RoCE query qp requester ramrod data */
struct roce_query_qp_req_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE query qp responder output params */
struct roce_query_qp_resp_output_params {
__le32 psn;
__le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
};
+/* RoCE query qp responder ramrod data */
struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
ROCE_RAMROD_CREATE_QP = 11,
ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
MAX_ROCE_RAMROD_CMD_ID
};
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
u8 byte4;
u8 byte5;
__le16 snd_sq_cons;
- __le16 word2;
+ __le16 conn_dpi;
__le16 word3;
__le32 reg9;
__le32 reg10;
};
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
- __le16 word1;
- __le16 irq_prod;
- __le16 word3;
- __le16 word4;
- __le16 ereserved1;
+ __le16 irq_prod_shadow;
+ __le16 word2;
__le16 irq_cons;
+ __le16 irq_prod;
+ __le16 e5_reserved1;
+ __le16 conn_dpi;
u8 rxmit_opcode;
u8 byte4;
u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+/* Roce doorbell data */
enum roce_flavor {
PLAIN_ROCE,
RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
MAX_ROCE_FLAVOR
};
+/* The iwarp storm context of Ystorm */
struct ystorm_iwarp_conn_st_ctx {
__le32 reserved[4];
};
+/* The iwarp storm context of Pstorm */
struct pstorm_iwarp_conn_st_ctx {
__le32 reserved[36];
};
+/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[44];
};
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
__le32 last_hq_sequence;
};
+/* The iwarp storm context of Tstorm */
struct tstorm_iwarp_conn_st_ctx {
__le32 reserved[60];
};
+/* The iwarp storm context of Mstorm */
struct mstorm_iwarp_conn_st_ctx {
__le32 reserved[32];
};
+/* The iwarp storm context of Ustorm */
struct ustorm_iwarp_conn_st_ctx {
__le32 reserved[24];
};
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
};
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
struct iwarp_create_qp_ramrod_data {
u8 flags;
#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
u8 reserved1;
__le16 pd;
__le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
u8 reserved2[6];
};
+/* iWARP completion queue types */
enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
u8 reserved[5];
};
+/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
IWARP_EVENT_TYPE_TCP_OFFLOAD =
11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
MAX_IWARP_EQE_SYNC_OPCODE
};
+/* iWARP EQE completion status */
enum iwarp_fw_return_code {
IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
MAX_IWARP_FW_RETURN_CODE
};
+/* unaligned opaque data received from LL2 */
struct iwarp_init_func_params {
u8 ll2_ooo_q_index;
u8 reserved1[7];
};
+/* iwarp func init ramrod data */
struct iwarp_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
struct tcp_init_params tcp;
struct iwarp_init_func_params iwarp;
};
+/* iWARP QP - possible states to transition to */
enum iwarp_modify_qp_new_state_type {
IWARP_MODIFY_QP_STATE_CLOSING = 1,
- IWARP_MODIFY_QP_STATE_ERROR =
- 2,
+ IWARP_MODIFY_QP_STATE_ERROR = 2,
MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
};
+/* iwarp modify qp responder ramrod data */
struct iwarp_modify_qp_ramrod_data {
__le16 transition_to_state;
__le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
__le32 reserved3[3];
__le32 reserved4[8];
};
+/* MPA params for Enhanced mode */
struct mpa_rq_params {
__le32 ird;
__le32 ord;
};
+/* MPA host Address-Len for private data */
struct mpa_ulp_buffer {
struct regpair addr;
__le16 len;
__le16 reserved[3];
};
+/* iWARP MPA offload params common to Basic and Enhanced modes */
struct mpa_outgoing_params {
u8 crc_needed;
u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
struct mpa_ulp_buffer outgoing_ulp_buffer;
};
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
struct iwarp_mpa_offload_ramrod_data {
struct mpa_outgoing_params common;
__le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
u8 tcp_connect_side;
u8 rtr_pref;
#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
u8 reserved2;
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[15];
+ u8 reserved3[13];
};
+/* iWARP TCP connection offload params passed by driver to FW */
struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
u8 reserved[10];
};
+/* iWARP query QP output params */
struct iwarp_query_qp_output_params {
__le32 flags;
#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
u8 reserved1[4];
};
+/* iWARP query QP ramrod data */
struct iwarp_query_qp_ramrod_data {
struct regpair output_params_addr;
};
+/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
- 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
MAX_IWARP_RAMROD_CMD_ID
};
+/* Per PF iWARP retransmit path statistics */
struct iwarp_rxmit_stats_drv {
struct regpair tx_go_to_slow_start_event_cnt;
struct regpair tx_fast_retransmit_event_cnt;
};
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
struct iwarp_tcp_offload_ramrod_data {
struct iwarp_offload_params iwarp;
struct tcp_offload_params_opt2 tcp;
};
+/* iWARP MPA negotiation types */
enum mpa_negotiation_mode {
MPA_NEGOTIATION_TYPE_BASIC = 1,
MPA_NEGOTIATION_TYPE_ENHANCED = 2,
MAX_MPA_NEGOTIATION_MODE
};
+/* iWARP MPA Enhanced mode RTR types */
enum mpa_rtr_type {
MPA_RTR_TYPE_NONE = 0,
MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
MAX_MPA_RTR_TYPE
};
+/* unaligned opaque data received from LL2 */
struct unaligned_opaque_data {
__le16 first_mpa_offset;
u8 tcp_payload_offset;
u8 flags;
#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
__le32 cid;
};
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
__le32 reg3;
};
+/* The fcoe storm context of Ystorm */
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
struct regpair reserved;
__le16 min_frame_size;
u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
u8 dst_protection_per_mss;
u8 src_protection_per_mss;
u8 ptu_log_page_size;
u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
};
+/* FCoE 16-bits vlan structure */
struct fcoe_vlan_fields {
__le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
};
+/* FCoE 16-bits vlan union */
union fcoe_vlan_field_union {
struct fcoe_vlan_fields fields;
__le16 val;
};
+/* FCoE 16-bits vlan, vif union */
union fcoe_vlan_vif_field_union {
union fcoe_vlan_field_union vlan;
__le16 vif;
};
+/* Ethernet context section */
struct pstorm_fcoe_eth_context_section {
u8 remote_addr_3;
u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
__le16 inner_eth_type;
};
+/* The fcoe storm context of Pstorm */
struct pstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
u8 sid_1;
u8 sid_0;
u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5
u8 did_2;
u8 did_1;
u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
u8 reserved1;
};
+/* The fcoe storm context of Xstorm */
struct xstorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
u8 cached_wqes_avail;
__le16 stat_ram_addr;
u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
u8 cached_wqes_offset;
u8 reserved2;
u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
u8 fcp_cmd_byte_credit;
u8 fcp_rsp_byte_credit;
__le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
__le16 xferq_pbl_next_index;
__le16 page_size;
u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
__le32 reg8;
};
+/* The fcoe storm context of Ustorm */
struct ustorm_fcoe_conn_st_ctx {
struct regpair respq_pbl_addr;
__le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
- u8 fcoe_state;
+ u8 state;
u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
__le16 word3;
};
+/* The fcoe storm context of Tstorm */
struct tstorm_fcoe_conn_st_ctx {
__le16 stat_ram_addr;
__le16 rx_max_fc_payload_len;
__le16 e_d_tov_val;
u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
u8 timers_cleanup_invocation_cnt;
__le32 reserved1[2];
- __le32 dst_mac_address_bytes0to3;
- __le16 dst_mac_address_bytes4to5;
+ __le32 dst_mac_address_bytes_0_to_3;
+ __le16 dst_mac_address_bytes_4_to_5;
__le16 ramrod_echo;
u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
- u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+ u8 cq_relative_offset;
+ u8 cmdq_relative_offset;
u8 bdq_resource_id;
- u8 reserved0[5];
+ u8 reserved0[4];
};
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
__le16 xfer_prod;
- __le16 reserved1;
+ u8 num_cqs;
+ u8 reserved1;
u8 protection_info;
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1
#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
u8 reserved2[2];
};
+/* Non fast path part of the fcoe storm context of Mstorm */
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
__le16 conn_id;
__le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
struct regpair reserved2[3];
};
+/* The fcoe storm context of Mstorm */
struct mstorm_fcoe_conn_st_ctx {
struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
};
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
struct fcoe_conn_offload_ramrod_params {
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
struct fcoe_conn_terminate_ramrod_params {
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
+/* FCoE event type */
enum fcoe_event_type {
FCOE_EVENT_INIT_FUNC,
FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
MAX_FCOE_EVENT_TYPE
};
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
struct fcoe_init_ramrod_params {
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
+/* FCoE ramrod Command IDs */
enum fcoe_ramrod_cmd_id {
FCOE_RAMROD_CMD_ID_INIT_FUNC,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
MAX_FCOE_RAMROD_CMD_ID
};
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
__le32 reg3;
};
+/* The iscsi storm connection context of Ystorm */
struct ystorm_iscsi_conn_st_ctx {
- __le32 reserved[4];
+ __le32 reserved[8];
};
+/* Combined iSCSI and TCP storm connection of Pstorm */
struct pstorm_iscsi_tcp_conn_st_ctx {
__le32 tcp[32];
__le32 iscsi[4];
};
+/* The combined tcp and iscsi storm context of Xstorm */
struct xstorm_iscsi_tcp_conn_st_ctx {
- __le32 reserved_iscsi[40];
__le32 reserved_tcp[4];
+ __le32 reserved_iscsi[44];
};
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 ereserved;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
__le16 word3;
};
+/* The iscsi storm connection context of Tstorm */
struct tstorm_iscsi_conn_st_ctx {
- __le32 reserved[40];
+ __le32 reserved[44];
};
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
+/* Combined iSCSI and TCP storm connection of Mstorm */
struct mstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_tcp[20];
- __le32 reserved_iscsi[8];
+ __le32 reserved_iscsi[12];
};
+/* The iscsi storm context of Ustorm */
struct ustorm_iscsi_conn_st_ctx {
__le32 reserved[52];
};
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
struct iscsi_init_ramrod_params {
struct iscsi_spe_func_init iscsi_init_spe;
struct tcp_init_params tcp_init;
};
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a05feb38c6ee..fca2dbd93ad9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase)
+{
+ u32 size = PAGE_SIZE / 2, val;
+ struct qed_dmae_params params;
+ int rc = 0;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ 2 * size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return -ENOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ memset((u8 *)p_virt + size, 0, size);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+ phase,
+ (u64)p_phys,
+ p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+ memset(&params, 0, sizeof(params));
+ rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index f2505c691c26..8db2839a8ec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index b069ad088269..18fb5062a83d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,6 +31,7 @@
*/
#include <linux/types.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -40,102 +41,197 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+ {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+ {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
+#define QM_INVALID_PQ_ID 0xffff
+
/* Feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
/* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
+#define QM_OTHER_PQS_PER_PF 4
+
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5 /* in us */
-#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-#define QM_RL_INC_VAL(rate) max_t(u32, \
- (u32)(((rate ? rate : \
- 1000000) * \
- QM_RL_PERIOD * \
- 101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), \
+ 1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
+ QM_RL_INC_VAL(speed), \
+ 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+
/* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF 1
-#define QM_OPPOR_FW_STOP_DEF 0
-#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
- (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
- PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
- 4) * \
- 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 2
-#define QM_STOP_CMD_STRUCT_SIZE 2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
- _STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field, \
- value) SET_FIELD(var[cmd ## _ ## field ## \
- _OFFSET], \
- cmd ## _ ## field, \
- value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
- (max_phys_tcs_per_port) + \
- (tc))
-#define LB_VOQ(port) ( \
- MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port) \
- ((tc) < \
- LB_TC ? PHYS_VOQ(port, \
- tc, \
- max_phy_tcs_pr_port) \
- : LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+ ext_voq, wrr) \
+ do { \
+ typeof(map) __map; \
+ memset(&__map, 0, sizeof(__map)); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+ rl_valid); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+ vp_pq_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+ SET_FIELD(__map.reg, \
+ QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+ *((u32 *)&__map)); \
+ (map) = __map; \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+ ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+ u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+ else
+ return port_id * max_phys_tcs_per_port + tc;
+}
+
/* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (1 << MAX_NUM_VOQS) - 1);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (u32)voq_bit_mask);
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+
/* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_PF_RL_UPPER_BOUND);
}
}
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
* the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq, u16 cmdq_lines)
+ u8 ext_voq, u16 cmdq_lines)
{
- u32 qm_line_crd;
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
qm_line_crd);
}
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
- /* Clear PBF lines for all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
-
- /* find #lines to divide between active phys TCs */
- phys_lines = port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
+ u16 phys_lines, phys_lines_per_tc;
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
+ if (!port_params[port_id].active)
+ continue;
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
- phys_lines_per_tc);
- }
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers for pure LB TC */
- qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
+ /* Init registers per active TC */
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq,
+ phys_lines_per_tc);
}
+
+ /* Init registers for pure LB TC */
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq, PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- u32 temp;
-
if (!port_params[port_id].active)
continue;
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC */
+ /* Find blocks per physical TC. Use factor to avoid floating
+ * arithmethic.
+ */
num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) == 1)
num_tcs_in_port++;
- }
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
(num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) != 1)
- continue;
-
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ tc) & 0x1) == 1) {
+ ext_voq =
+ qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET
+ (ext_voq), phys_blocks);
+ }
}
/* Init pure LB TC */
- temp = LB_VOQ(port_id);
- STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
pure_lb_blocks);
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
- struct init_qm_vport_params *vport_params = p_params->vport_params;
- u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
- QM_PF_QUEUE_GROUP_SIZE;
- u16 i, pq_id, pq_group;
-
- /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+ first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
/* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
- bool is_vf_pq = (i >= p_params->num_pf_pqs);
- struct qm_rf_pq_map tx_pq_map;
-
- bool rl_valid = p_params->pq_params[i].rl_valid &&
- (p_params->pq_params[i].vport_id <
- MAX_QM_GLOBAL_RLS);
+ u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ struct qm_rf_pq_map_e4 tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u16 *p_first_tx_pq_id;
+
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ tc_id,
+ p_params->max_phys_tcs_per_port);
+ is_vf_pq = (i >= p_params->num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid &&
+ pq_params[i].vport_id < max_qm_global_rls;
/* Update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
- p_params->start_vport;
- u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
- u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+ p_first_tx_pq_id =
+ &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+ if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val =
+ (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
- if (first_tx_pq_id == QM_INVALID_PQ_ID) {
/* Create new VP PQ */
- pq_ids[p_params->pq_params[i].tc_id] = pq_id;
- first_tx_pq_id = pq_id;
+ *p_first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
- first_tx_pq_id,
- (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id <<
- QM_WFQ_VP_PQ_PF_SHIFT));
+ *p_first_tx_pq_id,
+ map_val);
}
- if (p_params->pq_params[i].rl_valid && !rl_valid)
+ /* Check RL ID */
+ if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+ max_qm_global_rls)
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
- /* Fill PQ map entry */
- memset(&tx_pq_map, 0, sizeof(tx_pq_map));
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg,
- QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- rl_valid ?
- p_params->pq_params[i].vport_id : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
- p_params->pq_params[i].wrr_group);
- /* Write PQ map entry to CAM */
- STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
- *((u32 *)&tx_pq_map));
- /* Set base address */
+ "Invalid VPORT ID for rate limiter configuration\n");
+
+ /* Prepare PQ map entry */
+ QM_INIT_TX_PQ_MAP(p_hwfn,
+ tx_pq_map,
+ E4,
+ pq_id,
+ rl_valid ? 1 : 0,
+ *p_first_tx_pq_id,
+ rl_valid ? pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
+
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (p_params->is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+
+ pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+ p_params->pf_id,
+ tc_id,
+ p_params->port_id,
+ rl_valid ? 1 : 0,
+ rl_valid ?
+ pq_params[i].vport_id : 0);
+ qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
- /* a single other PQ group is used in each PF,
- * where PQ group i is used in PF i.
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
*/
pq_group = pf_id;
pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
+
/* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
- u32 crd_reg_offset;
- u32 inc_val;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 inc_val, crd_reg_offset;
+ u8 ext_voq;
u16 i;
- if (p_params->pf_id < MAX_NUM_PFS_BB)
- crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
- else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
- crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
- p_params->max_phys_tcs_per_port);
-
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ p_params->port_id,
+ pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ crd_reg_offset =
+ (p_params->pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn,
- crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
+
return 0;
}
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
- QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
+ u16 vport_pq_id;
u32 inc_val;
u8 tc, i;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
-
if (!vport_params[i].vport_wfq)
continue;
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return -1;
}
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET +
vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
+ u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl :
+ link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn,
"Invalid VPORT rate-limit configuration\n");
return -1;
}
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
udelay(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
}
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
}
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
{
- /* init AFullOprtnstcCrdMask */
+ /* Init AFullOprtnstcCrdMask */
u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+ /* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+ /* Enable/disable VPORT RL */
qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+ /* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
qed_cmdq_lines_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
+ /* Init BTB blocks in PBF */
qed_btb_blocks_rt_init(p_hwfn,
p_params->max_ports_per_engine,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+
return 0;
}
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
/* Map Other PQs (if any) */
- qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
- p_params->num_pf_cids, p_params->num_tids, 0);
+ qed_other_pq_map_rt_init(p_hwfn,
+ p_params->pf_id,
+ p_params->is_pf_loading, p_params->num_pf_cids,
+ p_params->num_tids, 0);
/* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ /* Init PF WFQ */
if (p_params->pf_wfq)
if (qed_pf_wfq_rt_init(p_hwfn, p_params))
return -1;
+ /* Init PF RL */
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
+ /* Set VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, vport_params))
+ p_params->num_vports, p_params->link_speed,
+ vport_params))
return -1;
return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
}
qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLPFCRD + pf_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn, p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
- inc_val);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
return 0;
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+ struct qed_ptt *p_ptt,
+ u8 vport_id, u32 vport_rl, u32 link_speed)
{
- u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
- qed_wr(p_hwfn, p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
/* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
- pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+ pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
/* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
- PAUSE_MASK, pq_mask);
- QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD, PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD,
GROUP_ID,
pq_id / QM_STOP_PQ_MASK_WIDTH);
if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
- if (enable)
- set_bit(bit, var);
- else
- clear_bit(bit, var);
-}
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+ do { \
+ typeof(var) *__p_var = &(var); \
+ typeof(offset) __offset = offset; \
+ *__p_var = (*__p_var & ~BIT(__offset)) | \
+ ((enable) ? BIT(__offset) : 0); \
+ } while (0)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
- vxlan_enable ? 1 : 0);
+ /* Update DORQ register */
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
}
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
- eth_gre_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
- ip_gre_enable ? 1 : 0);
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
{
+ /* Update PRS register */
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable)
{
- unsigned long reg_val = 0;
+ u32 reg_val;
u8 shift;
+ /* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ /* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+ /* EDPM with geneve tunnel not supported in BB */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
- pf_id * RAM_LINE_SIZE;
-
- /*stop using gft logic */
+ /* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next gft session */
+
+ /* Zero camline */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr, 0);
- qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+ /* Zero ramline */
+ qed_wr(p_hwfn,
+ p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ 0);
}
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 pf_id, bool tcp, bool udp,
- bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
u32 rfs_cm_hdr_event_id;
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
DP_NOTICE(p_hwfn,
- "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+ "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
- rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
- /* Configure Registers for RFS mode */
- qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ /* Do not load context only cid in PRS on match. */
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
- camline.cam_line_mapped.camline = 0;
- /* Cam line is now valid!! */
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* Do not use tenant ID exist bit for gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
- /* filters are per PF!! */
- SET_FIELD(camline.cam_line_mapped.camline,
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_PF_ID_MASK,
GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
- SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
- SET_FIELD(camline.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
- PRS_REG_GFT_CAM +
- CAM_LINE_SIZE * pf_id);
+ cam_line);
+ cam_line =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ramline.lo = 0;
- ramline.hi = 0;
- SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
- /* Each iteration write to reg */
- qed_wr(p_hwfn, p_ptt,
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ }
+
+ qed_wr(p_hwfn,
+ p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
- ramline.hi);
+ ram_line_lo);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+ ram_line_hi);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
- ramline.lo);
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
- ramline.hi);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+ /* Enable gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ static u8 crc8_table_valid; /* automatically initialized to 0 */
+ u32 validation_string = 0;
+ u32 data_to_crc;
+
+ if (!crc8_table_valid) {
+ crc8_populate_msb(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
+
+ /* The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian and calculate CRC8 */
+ data_to_crc = be32_to_cpu(validation_string);
+
+ crc = crc8(cdu_crc8_table,
+ (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
+ validation_byte |=
+ ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index e3f368882f46..3bb76da6baa2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
}
/* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
{
- DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+ int rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return -EINVAL;
+ }
+
+ return rc;
}
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
- qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 719cdbfe1695..d3eabcf9c86c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -59,10 +59,10 @@ struct qed_pi_info {
};
struct qed_sb_sp_info {
- struct qed_sb_info sb_info;
+ struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev))
return;
- sb_offset = igu_sb_id * PIS_PER_SB;
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 5199634ed630..54b4ee0acfd7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 813c77cc857f..c0d4a54a5edb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,22 +62,6 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
- u8 fw_event_code,
- u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
- if (p_hwfn->p_iscsi_info->event_cb) {
- struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
- return p_iscsi->event_cb(p_iscsi->event_context,
- fw_event_code, data);
- } else {
- DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
- return -EINVAL;
- }
-}
-
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
- u8 tcp_flags;
+ u16 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
};
static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
+static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
- p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
+
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
- p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+ p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0;
u32 dval;
u16 wval;
- u8 i;
u16 *p;
+ u8 i;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp->flags = p_conn->tcp_flags;
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
- p_tcp2->flags = p_conn->tcp_flags;
+ p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+ p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
}
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
}
}
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
{
if (!p_conn->queue_cnts_virt_addr)
goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc)
- rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+ rc = qed_iscsi_setup_connection(p_conn);
if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var;
- con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
- con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 409041eab189..ca4a81dc1ace 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
#define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1)
+#define QED_IWARP_DEF_MAX_RT_TIME (0)
+#define QED_IWARP_DEF_CWND_FACTOR (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
+
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo,
union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_ramrod_data *p_ramrod)
{
- p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
- p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->iwarp.ll2_ooo_q_index =
+ RESC_START(p_hwfn, QED_LL2_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+ p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+ return;
}
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
tcp->ttl = 0x40;
tcp->tos_or_tc = 0;
+ tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+ tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
+ tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+ tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+ tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode;
@@ -807,6 +826,7 @@ static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+ struct qed_iwarp_info *iwarp_info;
struct qed_sp_init_data init_data;
dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
p_mpa_ramrod->common.reject = 1;
}
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+ iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index c1ecd743305f..b8f612d00241 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
+ u16 rcv_wnd_size;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
- struct iwarp_init_func_params *p_ramrod);
+ struct iwarp_init_func_ramrod_data *p_ramrod);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 085338990f49..893ef08a4b39 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
- p_cid = vmalloc(sizeof(*p_cid));
+ p_cid = vzalloc(sizeof(*p_cid));
if (!p_cid)
return NULL;
- memset(p_cid, 0, sizeof(*p_cid));
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
{
- if (p_cfg_params->arfs_enable) {
- qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_cfg_params->tcp, p_cfg_params->udp,
- p_cfg_params->ipv4, p_cfg_params->ipv6);
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_PORT;
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+ qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ qed_arfs_mode_to_hsi(p_cfg_params->mode));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable",
- p_cfg_params->ipv6 ? "Enable" : "Disable");
+ p_cfg_params->ipv6 ? "Enable" : "Disable",
+ (u32)p_cfg_params->mode);
} else {
- qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+ qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
}
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length, u16 qid,
- u8 vport_id, bool b_is_add)
+ struct qed_ntuple_filter_params *p_params)
{
struct rx_update_gft_filter_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 abs_vport_id = 0;
int rc = -EINVAL;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
return rc;
- rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- if (rc)
- return rc;
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
p_ramrod = &p_ent->ramrod.rx_update_gft;
- DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
- p_ramrod->pkt_hdr_length = cpu_to_le16(length);
- p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->filter_type = RFS_FILTER_TYPE;
- p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ }
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
+ p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+ p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id,
- b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+ p_params->b_is_add ? "Adding" : "Removing",
+ (u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+ enum qed_filter_config_mode mode)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
arfs_config_params.udp = true;
arfs_config_params.ipv4 = true;
arfs_config_params.ipv6 = true;
- arfs_config_params.arfs_enable = en_searcher;
-
+ arfs_config_params.mode = mode;
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&arfs_config_params);
return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
static void
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
- void *cookie, union event_ring_data *data,
- u8 fw_return_code)
+ void *cookie,
+ union event_ring_data *data, u8 fw_return_code)
{
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
op->arfs_filter_op(dev, cookie, fw_return_code);
}
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
cb.function = qed_arfs_sp_response_handler;
cb.cookie = cookie;
- rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
- &cb, mapping, length, rx_queue_id,
- vport_id, add_filter);
+ if (params->b_is_vf) {
+ if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+ false)) {
+ DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+ params->vf_id);
+ return rc;
+ }
+
+ params->vport_id = params->vf_id + 1;
+ params->qid = QED_RFS_NTUPLE_QID_RSS;
+ }
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
if (rc)
DP_NOTICE(p_hwfn,
"Failed to issue a-RFS filter configuration\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index cc1f248551c9..c4030e949cce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable;
+ enum qed_filter_config_mode mode;
};
struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_cb,
+ struct qed_ntuple_filter_params *p_params);
+
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 047f556ca62e..c4f14fdc4e77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+ data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+ data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
}
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+ p_ramrod->inner_vlan_stripping_en =
+ p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
- p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
- CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+ switch (data->input.tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue)
p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate;
}
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params);
if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address);
- if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
- cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b99c7d26f34..6f46cb11f349 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset +
(bytes_to_copy <<
- DRV_MB_PARAM_NVM_LEN_SHIFT),
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
(u32 *)(p_buf + offset));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b3940564..5d040b873137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info);
}
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+ qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
static void qed_rdma_free(struct qed_hwfn *p_hwfn)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+ qed_rdma_free_reserved_lkey(p_hwfn);
qed_rdma_resc_free(p_hwfn);
}
@@ -553,7 +570,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
qed_iwarp_init_fw_ramrod(p_hwfn,
- &p_ent->ramrod.iwarp_init_func.iwarp);
+ &p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
/* Tid 0 will be used as the key for "reserved MR".
* The driver should allocate memory for it so it can be loaded but no
* ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 0cdb4337b3a0..f7122059b6b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -124,6 +124,8 @@
0x1f0434UL
#define PRS_REG_SEARCH_TAG1 \
0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+ 0x1f044cUL
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
0x1f0a0cUL
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
@@ -200,7 +202,13 @@
0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
-#define CDU_REG_CID_ADDR_PARAMS \
+#define CDU_REG_CCFC_CTX_VALID0 \
+ 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+ 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+ 0x580408UL
+#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
@@ -564,7 +572,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -580,11 +588,11 @@
#define PRS_REG_NGE_PORT 0x1f086cUL
#define NIG_REG_NGE_PORT 0x508b38UL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -595,15 +603,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -615,7 +623,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2 \
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -647,15 +655,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -717,15 +725,15 @@
0x1f0ba0UL
#define PRS_REG_DBG_FORCE_FRAME \
0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
0x218264UL
#define PRM_REG_DBG_SELECT \
0x2306a8UL
@@ -997,35 +1005,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1247,36 +1255,76 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+ 0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+ 0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+ 0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+ 0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5 \
+ 0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+ 0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5 \
+ 0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+ 0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x322050UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
#define MISC_REG_RESET_PL_HV \
@@ -1415,7 +1463,7 @@
0x1940000UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
@@ -1433,6 +1481,8 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+ 64
#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
@@ -1451,7 +1501,7 @@
0x238c30UL
#define MISCS_REG_BLOCK_256B_EN \
0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
57344
#define MCP_REG_CPU_REG_FILE \
0xe05200UL
@@ -1485,35 +1535,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a1d33f35aad3..5e927b6cac22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ cpu_to_le16(p_hwfn->hw_info.ovlan);
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index, p_ramrod->outer_tag);
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9abd001..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -97,9 +97,7 @@ static int __qed_spq_block(struct qed_hwfn *p_hwfn,
while (iter_cnt--) {
/* Validate we receive completion update */
- if (READ_ONCE(comp_done->done) == 1) {
- /* Read updated FW return value */
- smp_read_barrier_depends();
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
@@ -215,7 +213,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct core_conn_context *p_cxt;
+ struct e4_core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -233,11 +231,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
@@ -776,6 +774,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
+ bool eblock;
if (!p_hwfn)
return -EINVAL;
@@ -794,6 +793,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (rc)
goto spq_post_fail;
+ /* Check if entry is in block mode before qed_spq_add_entry,
+ * which might kfree p_ent.
+ */
+ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
@@ -811,7 +815,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_spq->lock);
- if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ if (eblock) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3f40b1de7957..5acb91b3564c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id,
- bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
+ struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 3955929ba892..9a8fd79611f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
#ifdef CONFIG_QED_SRIOV
/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+ return false;
+}
+
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index a3a70ade411f..9935978c5542 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#include <linux/qed/qede_rdma.h>
#include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 10
-#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 21
+#define QEDE_MINOR_VERSION 33
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -345,6 +346,7 @@ struct qede_rx_queue {
u64 xdp_no_pass;
void *handle;
+ struct xdp_rxq_info xdp_rxq;
};
union db_prod {
@@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
int qede_configure_vlan_filters(struct qede_dev *edev);
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features);
int qede_set_features(struct net_device *dev, netdev_features_t features);
void qede_set_rx_mode(struct net_device *ndev);
void qede_config_rx_mode(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index dae741270022..4ca3847fffd4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
+ if (new_mtu > PAGE_SIZE)
+ ndev->features &= ~NETIF_F_GRO_HW;
+
/* Set the mtu field and re-start the interface if needed */
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c1a0708a7d7c..6687e04d1558 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
u16 rxq_id, bool add_fltr)
{
const struct qed_eth_ops *op = edev->ops;
+ struct qed_ntuple_filter_params params;
if (n->used)
return;
+ memset(&params, 0, sizeof(params));
+
+ params.addr = n->mapping;
+ params.length = n->buf_len;
+ params.qid = rxq_id;
+ params.b_is_add = add_fltr;
+
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
"%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
n->used = true;
n->filter_op = add_fltr;
- op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
- rxq_id, add_fltr);
+ op->ntuple_filter_config(edev->cdev, n, &params);
}
static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count++;
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
- edev->ops->configure_arfs_searcher(edev->cdev, true);
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
edev->arfs->enable = true;
}
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
edev->arfs->filter_count--;
if (!edev->arfs->filter_count && edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
}
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
if (!edev->arfs->filter_count) {
if (edev->arfs->enable) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
edev->arfs->enable = false;
- edev->ops->configure_arfs_searcher(edev->cdev, false);
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
}
#ifdef CONFIG_RFS_ACCEL
} else {
@@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
edev->ndev->features = args->u.features;
}
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+ !(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ return features;
+}
+
int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
bool need_reload = false;
- /* No action needed if hardware GRO is disabled during driver load */
- if (changes & NETIF_F_GRO) {
- if (dev->features & NETIF_F_GRO)
- need_reload = !edev->gro_disable;
- else
- need_reload = edev->gro_disable;
- }
+ if (changes & NETIF_F_GRO_HW)
+ need_reload = true;
if (need_reload) {
struct qede_reload_args args;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c56cddf..dafc079ab6b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data = xdp.data_hard_start + *data_offset;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
+ xdp.rxq = &rxq->xdp_rxq;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8f9b3eb82137..2db70eabddfe 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_change_mtu = qede_change_mtu,
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->priv_flags |= IFF_UNICAST_FLT;
/* user-changeble features */
- hw_features = NETIF_F_GRO | NETIF_F_SG |
+ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
@@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
fp = &edev->fp_array[i];
kfree(fp->sb_info);
+ /* Handle mem alloc failure case where qede_init_fp
+ * didn't register xdp_rxq_info yet.
+ * Implicit only (fp->type & QEDE_FASTPATH_RX)
+ */
+ if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
kfree(fp->rxq);
kfree(fp->xdp_tx);
kfree(fp->txq);
@@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
pci_set_drvdata(pdev, NULL);
- /* Release edev's reference to XDP's bpf if such exist */
- if (edev->xdp_prog)
- bpf_prog_put(edev->xdp_prog);
-
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
@@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
dma_addr_t mapping;
int i;
- /* Don't perform FW aggregations in case of XDP */
- if (edev->xdp_prog)
- edev->gro_disable = 1;
-
if (edev->gro_disable)
return 0;
- if (edev->ndev->mtu > PAGE_SIZE) {
- edev->gro_disable = 1;
- return 0;
- }
-
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
err:
qede_free_sge_mem(edev, rxq);
edev->gro_disable = 1;
+ edev->ndev->features &= ~NETIF_F_GRO_HW;
return -ENOMEM;
}
@@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
else
fp->rxq->data_direction = DMA_FROM_DEVICE;
fp->rxq->dev = &edev->pdev->dev;
+
+ /* Driver have no error path from here */
+ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+ fp->rxq->rxq_id) < 0);
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev)
edev->ndev->name, queue_id);
}
- edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f7080d0ab874..46b0372dd032 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
struct list_head *head = &mbx->cmd_q;
struct qlcnic_cmd_args *cmd = NULL;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
while (!list_empty(head)) {
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
@@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
}
static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
@@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
{
struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_del(&cmd->list);
mbx->num_cmds--;
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
qlcnic_83xx_notify_cmd_completion(adapter, cmd);
}
@@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
init_completion(&cmd->completion);
cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
list_add_tail(&cmd->list, &mbx->cmd_q);
mbx->num_cmds++;
@@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
queue_work(mbx->work_q, &mbx->work);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return 0;
}
@@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
spin_unlock_irqrestore(&mbx->aen_lock, flags);
- spin_lock(&mbx->queue_lock);
+ spin_lock_bh(&mbx->queue_lock);
if (list_empty(head)) {
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
return;
}
cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
- spin_unlock(&mbx->queue_lock);
+ spin_unlock_bh(&mbx->queue_lock);
mbx_ops->encode_cmd(adapter, cmd);
mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index 5028fb4bec2b..4beedb8faa1e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -114,8 +114,9 @@ struct emac_tpd {
#define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val)
/* High-14bit Buffer Address, So, the 64b-bit address is
* {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L}
+ * Extend TPD_BUFFER_ADDR_H to [31, 18], because we never enable timestamping.
*/
-#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val)
+#define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 31, val)
/* Format D. Word offset from the 1st byte of this packet to start to calculate
* the custom checksum.
*/
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
#define MDIO_CLK_25_28 7
#define MDIO_WAIT_TIMES 1000
+#define MDIO_STATUS_DELAY_TIME 1
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)),
- 100, MDIO_WAIT_TIMES * 100))
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
writel(reg, adpt->base + EMAC_MDIO_CTRL);
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
- !(reg & (MDIO_START | MDIO_BUSY)), 100,
- MDIO_WAIT_TIMES * 100))
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 70c92b649b29..13235baf4766 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
return ret;
}
- ret = emac_mac_up(adpt);
+ ret = adpt->phy.open(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
return ret;
}
- ret = adpt->phy.open(adpt);
+ ret = emac_mac_up(adpt);
if (ret) {
- emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
+ adpt->phy.close(adpt);
return ret;
}
@@ -615,8 +615,11 @@ static int emac_probe(struct platform_device *pdev)
u32 reg;
int ret;
- /* The TPD buffer address is limited to 45 bits. */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45));
+ /* The TPD buffer address is limited to:
+ * 1. PTP: 45bits. (Driver doesn't support yet.)
+ * 2. NON-PTP: 46bits.
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
if (ret) {
dev_err(&pdev->dev, "could not set DMA mask\n");
return ret;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index df21e900f874..7e7704daf5f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
- RMNET_INGRESS_FORMAT_DEAGGREGATION |
- RMNET_INGRESS_FORMAT_MAP;
- int egress_format = RMNET_EGRESS_FORMAT_MUXING |
- RMNET_EGRESS_FORMAT_MAP;
+ u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err2;
- netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
- ingress_format, egress_format);
- port->egress_data_format = egress_format;
- port->ingress_data_format = ingress_format;
port->rmnet_mode = mode;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ data_format = flags->flags & flags->mask;
+ }
+
+ netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+ port->data_format = data_format;
+
return 0;
err2:
@@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ u16 mux_id;
+
+ real_dev = __dev_get_by_index(dev_net(dev),
+ nla_get_u32(tb[IFLA_LINK]));
+
+ if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ return -ENODEV;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ if (data[IFLA_VLAN_ID]) {
+ mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
+
+ if (data[IFLA_VLAN_FLAGS]) {
+ struct ifla_vlan_flags *flags;
+
+ flags = nla_data(data[IFLA_VLAN_FLAGS]);
+ port->data_format = flags->flags & flags->mask;
+ }
+
+ return 0;
+}
+
static size_t rmnet_get_size(const struct net_device *dev)
{
- return nla_total_size(2); /* IFLA_VLAN_ID */
+ return nla_total_size(2) /* IFLA_VLAN_ID */ +
+ nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
}
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
@@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.newlink = rmnet_newlink,
.dellink = rmnet_dellink,
.get_size = rmnet_get_size,
+ .changelink = rmnet_changelink,
};
/* Needs either rcu_read_lock() or rtnl lock */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index c19259eea99e..00e4634100d3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,8 +32,7 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
- u32 ingress_data_format;
- u32 egress_data_format;
+ u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 08e4afc0ab39..601edec28c5f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
@@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
+ u16 len, pad;
u8 mux_id;
- u16 len;
if (RMNET_MAP_GET_CD_BIT(skb)) {
- if (port->ingress_data_format
- & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+ pad = RMNET_MAP_GET_PAD(skb);
+ len = RMNET_MAP_GET_LENGTH(skb) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
- skb_trim(skb, len);
rmnet_set_skb_proto(skb);
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
{
struct sk_buff *skbn;
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
- while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+ if (skb->dev->type == ARPHRD_ETHER) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, ETH_HLEN);
+ }
+
+ if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
@@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+ additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+ required_headroom += additional_header_len;
+ }
+
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
goto fail;
}
+ if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
if (!map_header)
goto fail;
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
- if (mux_id == 0xff)
- map_header->mux_id = 0;
- else
- map_header->mux_id = mux_id;
- }
+ map_header->mux_id = mux_id;
skb->protocol = htons(ETH_P_MAP);
- return RMNET_MAP_SUCCESS;
+ return 0;
fail:
kfree_skb(skb);
- return RMNET_MAP_CONSUMED;
+ return -ENOMEM;
}
static void
@@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
- rmnet_map_ingress_handler(skb, port);
+ rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
@@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_priv *priv;
u8 mux_id;
+ sk_pacing_shift_update(skb->sk, 8);
+
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
@@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
return;
}
- if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
- switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
- case RMNET_MAP_CONSUMED:
- return;
-
- case RMNET_MAP_SUCCESS:
- break;
-
- default:
- kfree_skb(skb);
- return;
- }
- }
+ if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+ return;
rmnet_vnd_tx_fixup(skb, orig_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 3af3fe7b5457..6ce31e29136d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -30,15 +30,6 @@ struct rmnet_map_control_command {
};
} __aligned(1);
-enum rmnet_map_results {
- RMNET_MAP_SUCCESS,
- RMNET_MAP_CONSUMED,
- RMNET_MAP_GENERAL_FAILURE,
- RMNET_MAP_NOT_ENABLED,
- RMNET_MAP_FAILED_AGGREGATION,
- RMNET_MAP_FAILED_MUX
-};
-
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -56,6 +47,22 @@ struct rmnet_map_header {
u16 pkt_len;
} __aligned(1);
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 valid:1;
+ u8 reserved2:7;
+ u16 csum_start_offset;
+ u16 csum_length;
+ __be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ u16 csum_insert_offset:14;
+ u16 udp_ip4_ind:1;
+ u16 csum_enabled:1;
+} __aligned(1);
+
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -76,10 +83,13 @@ struct rmnet_map_header {
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e604923ac1..6bc328fb88e1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
static void rmnet_map_send_ack(struct sk_buff *skb,
- unsigned char type)
+ unsigned char type,
+ struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
int xmit_status;
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+ if (skb->len < sizeof(struct rmnet_map_header) +
+ RMNET_MAP_GET_LENGTH(skb) +
+ sizeof(struct rmnet_map_dl_csum_trailer)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_trim(skb, skb->len -
+ sizeof(struct rmnet_map_dl_csum_trailer));
+ }
+
skb->protocol = htons(ETH_P_MAP);
cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
- rmnet_map_send_ack(skb, rc);
+ rmnet_map_send_ack(skb, rc, port);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c758f94e..c74a6c56d315 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
*/
#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -21,6 +24,233 @@
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ __sum16 *check = NULL;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ check = &(((struct tcphdr *)txporthdr)->check);
+ break;
+
+ case IPPROTO_UDP:
+ check = &(((struct udphdr *)txporthdr)->check);
+ break;
+
+ default:
+ check = NULL;
+ break;
+ }
+
+ return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+ u16 csum_value, csum_value_final;
+ struct iphdr *ip4h;
+ void *txporthdr;
+ __be16 addend;
+
+ ip4h = (struct iphdr *)(skb->data);
+ if ((ntohs(ip4h->frag_off) & IP_MF) ||
+ ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ return -EOPNOTSUPP;
+
+ txporthdr = skb->data + ip4h->ihl * 4;
+
+ csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+ return 0;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+ ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+ (__force __be16)hdr_csum);
+
+ pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip4h->protocol) {
+ case IPPROTO_UDP:
+ /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL4 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+ __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+ u16 csum_value, csum_value_final;
+ __be16 ip6_hdr_csum, addend;
+ struct ipv6hdr *ip6h;
+ void *txporthdr;
+ u32 length;
+
+ ip6h = (struct ipv6hdr *)(skb->data);
+
+ txporthdr = skb->data + sizeof(struct ipv6hdr);
+ csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+ if (!csum_field)
+ return -EPROTONOSUPPORT;
+
+ csum_value = ~ntohs(csum_trailer->csum_value);
+ ip6_hdr_csum = (__force __be16)
+ ~ntohs((__force __be16)ip_compute_csum(ip6h,
+ (int)(txporthdr - (void *)(skb->data))));
+ ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+ ip6_hdr_csum);
+
+ length = (ip6h->nexthdr == IPPROTO_UDP) ?
+ ntohs(((struct udphdr *)txporthdr)->len) :
+ ntohs(ip6h->payload_len);
+ pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ length, ip6h->nexthdr, 0));
+ addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+ pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+ addend = (__force __be16)ntohs((__force __be16)*csum_field);
+ csum_temp = ~csum16_sub(pseudo_csum, addend);
+ csum_value_final = (__force u16)csum_temp;
+
+ if (unlikely(csum_value_final == 0)) {
+ switch (ip6h->nexthdr) {
+ case IPPROTO_UDP:
+ /* RFC 2460 section 8.1
+ * DL6 One's complement rule for UDP checksum 0
+ */
+ csum_value_final = ~csum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ /* DL6 Non-RFC compliant TCP checksum found */
+ if (*csum_field == (__force __sum16)0xFFFF)
+ csum_value_final = ~csum_value_final;
+ break;
+ }
+ }
+
+ if (csum_value_final == ntohs((__force __be16)*csum_field))
+ return 0;
+ else
+ return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = iphdr + ip4h->ihl * 4;
+
+ if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)iphdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+ ul_header->udp_ip4_ind = 1;
+ else
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+ if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+ (unsigned char *)ip6hdr));
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ ul_header->udp_ip4_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+ *hdr = htons((__force u16)*hdr);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
u32 padding, map_datalen;
u8 *padbytes;
- if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
- return NULL;
-
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ done:
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+ if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+ packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
return skbn;
}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+ struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return -EOPNOTSUPP;
+
+ csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+ if (!csum_trailer->valid)
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+ return -EPROTONOSUPPORT;
+#endif
+
+ return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
+{
+ struct rmnet_map_ul_csum_header *ul_header;
+ void *iphdr;
+
+ ul_header = (struct rmnet_map_ul_csum_header *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+ goto sw_csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ return;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ return;
+#else
+ goto sw_csum;
+#endif
+ }
+ }
+
+sw_csum:
+ ul_header->csum_start_offset = 0;
+ ul_header->csum_insert_offset = 0;
+ ul_header->csum_enabled = 0;
+ ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 49102f922b31..de0143eaa05a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,14 +19,10 @@
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
-#define RMNET_EGRESS_FORMAT_MAP BIT(1)
-#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2)
-#define RMNET_EGRESS_FORMAT_MUXING BIT(3)
-
-#define RMNET_INGRESS_FORMAT_MAP BIT(1)
-#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2)
-#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3)
-#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9caa5e387450..570a227acdd8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
if (ep->egress_dev)
return -EINVAL;
+ if (rmnet_get_endpoint(port, id))
+ return -EBUSY;
+
+ rmnet_dev->hw_features = NETIF_F_RXCSUM;
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e87de2..81045dfa1cd8 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss > MSSMask) {
- WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
- mss);
+ netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
goto out_dma_error;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa65ad4..0bf7d1759250 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R8(IBISR0) & 0x02;
+ return RTL_R8(IBISR0) & 0x20;
}
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
@@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
}
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
}
-static void __rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
{
if (tp->link_ok(ioaddr)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
- if (pm)
- pm_request_resume(&tp->pci_dev->dev);
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
if (net_ratelimit())
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- if (pm)
- pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+ pm_runtime_idle(&tp->pci_dev->dev);
}
}
-static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
-{
- __rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -2244,19 +2235,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
- bool ret;
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ RTL_R32(CounterAddrHigh);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd);
- ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
-
- return ret;
+ return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
static bool rtl8169_reset_counters(struct net_device *dev)
@@ -4643,16 +4629,6 @@ static void rtl8169_phy_timer(struct timer_list *t)
rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
-static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
- void __iomem *ioaddr)
-{
- iounmap(ioaddr);
- pci_release_regions(pdev);
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
- free_netdev(dev);
-}
-
DECLARE_RTL_COND(rtl_phy_reset_cond)
{
return tp->phy_reset_pending(tp);
@@ -4784,14 +4760,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
return -EOPNOTSUPP;
}
-static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
-{
- if (tp->features & RTL_FEATURE_MSI) {
- pci_disable_msi(pdev);
- tp->features &= ~RTL_FEATURE_MSI;
- }
-}
-
static void rtl_init_mdio_ops(struct rtl8169_private *tp)
{
struct mdio_ops *ops = &tp->mdio_ops;
@@ -7764,7 +7732,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
rtl_irq_enable_all(tp);
}
@@ -7977,7 +7945,7 @@ static int rtl_open(struct net_device *dev)
rtl_unlock_work(tp);
tp->saved_wolopts = 0;
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
rtl8169_check_link_status(dev, tp, ioaddr);
out:
@@ -8121,8 +8089,10 @@ static int rtl8169_runtime_suspend(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
+ if (!tp->TxDescArray) {
+ rtl_pll_power_down(tp);
return 0;
+ }
rtl_lock_work(tp);
tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8164,9 +8134,11 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- return tp->TxDescArray ? -EBUSY : 0;
+ if (!netif_running(dev) || !netif_carrier_ok(dev))
+ pm_schedule_suspend(device, 10000);
+
+ return -EBUSY;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8213,9 +8185,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- struct device *d = &pdev->dev;
-
- pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -8233,8 +8202,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
-
- pm_runtime_put_noidle(d);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -8256,9 +8223,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
- tp->counters, tp->counters_phys_addr);
-
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@@ -8266,9 +8230,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
-
- rtl_disable_msi(pdev, tp);
- rtl8169_release_board(pdev, dev, tp->mmio_addr);
}
static const struct net_device_ops rtl_netdev_ops = {
@@ -8445,11 +8406,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
MODULENAME, RTL8169_VERSION);
}
- dev = alloc_etherdev(sizeof (*tp));
- if (!dev) {
- rc = -ENOMEM;
- goto out;
- }
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+ if (!dev)
+ return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &rtl_netdev_ops;
@@ -8472,13 +8431,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
PCIE_LINK_STATE_CLKPM);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc < 0) {
netif_err(tp, probe, dev, "enable failure\n");
- goto err_out_free_dev_1;
+ return rc;
}
- if (pci_set_mwi(pdev) < 0)
+ if (pcim_set_mwi(pdev) < 0)
netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
/* make sure PCI base addr 1 is MMIO */
@@ -8486,30 +8445,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_err(tp, probe, dev,
"region #%d not an MMIO resource, aborting\n",
region);
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
netif_err(tp, probe, dev,
"Invalid PCI region size(s), aborting\n");
- rc = -ENODEV;
- goto err_out_mwi_2;
+ return -ENODEV;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
netif_err(tp, probe, dev, "could not request regions\n");
- goto err_out_mwi_2;
+ return rc;
}
/* ioremap MMIO region */
- ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
+ R8169_REGS_SIZE);
if (!ioaddr) {
netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- rc = -EIO;
- goto err_out_free_res_3;
+ return -EIO;
}
tp->mmio_addr = ioaddr;
@@ -8535,7 +8492,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_unmap_4;
+ return rc;
}
}
@@ -8697,16 +8654,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
- tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
- &tp->counters_phys_addr, GFP_KERNEL);
- if (!tp->counters) {
- rc = -ENOMEM;
- goto err_out_msi_5;
- }
+ tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr,
+ GFP_KERNEL);
+ if (!tp->counters)
+ return -ENOMEM;
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_6;
+ return rc;
pci_set_drvdata(pdev, dev);
@@ -8730,30 +8686,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- if (pci_dev_run_wake(pdev))
- pm_runtime_put_noidle(&pdev->dev);
-
netif_carrier_off(dev);
-out:
- return rc;
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_sync(&pdev->dev);
-err_out_cnt_6:
- dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
- tp->counters_phys_addr);
-err_out_msi_5:
- netif_napi_del(&tp->napi);
- rtl_disable_msi(pdev, tp);
-err_out_unmap_4:
- iounmap(ioaddr);
-err_out_free_res_3:
- pci_release_regions(pdev);
-err_out_mwi_2:
- pci_clear_mwi(pdev);
- pci_disable_device(pdev);
-err_out_free_dev_1:
- free_netdev(dev);
- goto out;
+ return 0;
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..c87f57ca4437 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2205,8 +2205,7 @@ out_dma_free:
if (chip_id != RCAR_GEN2)
ravb_ptp_stop(ndev);
out_release:
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -2308,32 +2307,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
struct ravb_private *priv = netdev_priv(ndev);
int ret = 0;
- if (priv->wol_enabled) {
- /* Reduce the usecount of the clock to zero and then
- * restore it to its original value. This is done to force
- * the clock to be re-enabled which is a workaround
- * for renesas-cpg-mssr driver which do not enable clocks
- * when resuming from PSCI suspend/resume.
- *
- * Without this workaround the driver fails to communicate
- * with the hardware if WoL was enabled when the system
- * entered PSCI suspend. This is due to that if WoL is enabled
- * we explicitly keep the clock from being turned off when
- * suspending, but in PSCI sleep power is cut so the clock
- * is disabled anyhow, the clock driver is not aware of this
- * so the clock is not turned back on when resuming.
- *
- * TODO: once the renesas-cpg-mssr suspend/resume is working
- * this clock dance should be removed.
- */
- clk_disable(priv->clk);
- clk_disable(priv->clk);
- clk_enable(priv->clk);
- clk_enable(priv->clk);
-
- /* Set reset mode to rearm the WoL logic */
+ /* If WoL is enabled set reset mode to rearm the WoL logic */
+ if (priv->wol_enabled)
ravb_write(ndev, CCC_OPC_RESET, CCC);
- }
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index db72d13cebb9..a197e11f3a56 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -1892,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
+ /* mask with MAC supported features */
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
+ int err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+ }
+
phy_attached_info(phydev);
return 0;
@@ -2079,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
- add_reg(ARSTR);
if (cd->tsu) {
+ add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);
@@ -3115,7 +3125,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
struct sh_eth_private *mdp;
struct net_device *ndev;
- int ret, devno;
+ int ret;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -3127,10 +3137,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- devno = pdev->id;
- if (devno < 0)
- devno = 0;
-
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
@@ -3212,25 +3218,43 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- /* ioremap the TSU registers */
if (mdp->cd->tsu) {
+ int port = pdev->id < 0 ? 0 : pdev->id % 2;
struct resource *rtsu;
+
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
- if (IS_ERR(mdp->tsu_addr)) {
- ret = PTR_ERR(mdp->tsu_addr);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "no TSU resource\n");
+ ret = -ENODEV;
goto out_release;
}
- mdp->port = devno % 2;
+ /* We can only request the TSU region for the first port
+ * of the two sharing this TSU for the probe to succeed...
+ */
+ if (port == 0 &&
+ !devm_request_mem_region(&pdev->dev, rtsu->start,
+ resource_size(rtsu),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "can't request TSU resource.\n");
+ ret = -EBUSY;
+ goto out_release;
+ }
+ /* ioremap the TSU registers */
+ mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+ resource_size(rtsu));
+ if (!mdp->tsu_addr) {
+ dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ mdp->port = port;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- /* initialize first or needed device */
- if (!devno || pd->needs_init) {
- if (mdp->cd->chip_reset)
- mdp->cd->chip_reset(ndev);
+ /* Need to init only the first port of the two sharing a TSU */
+ if (port == 0) {
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
- if (mdp->cd->tsu) {
/* TSU init (Init only)*/
sh_eth_tsu_init(mdp);
}
@@ -3272,8 +3296,7 @@ out_napi_del:
out_release:
/* net_dev free */
- if (ndev)
- free_netdev(ndev);
+ free_netdev(ndev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 6d6fb8cf3e7c..6473cc68c2d5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -789,7 +789,6 @@ static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
ofdpa_flags_nowait(flags),
ofdpa_cmd_flow_tbl_add,
found, NULL, NULL);
- return 0;
}
static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e566dbb3343d..75fbf58e421c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
}
+/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
+ * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
+ * bar; PFs use BAR 0/1 for memory.
+ */
+static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
+{
+ switch (efx->pci_dev->device) {
+ case 0x0b03: /* SFC9250 PF */
+ return 0;
+ default:
+ return 2;
+ }
+}
+
+/* All VFs use BAR 0/1 for memory */
+static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
+{
+ return 0;
+}
+
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
int bar;
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
return resource_size(&efx->pci_dev->resource[bar]);
}
@@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
@@ -257,9 +277,70 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
return -ENODEV;
}
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ u8 vi_window_mode = MCDI_BYTE(outbuf,
+ GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (vi_window_mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ efx->vi_stride = 8192;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ efx->vi_stride = 16384;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ efx->vi_stride = 65536;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev,
+ "Unrecognised VI window mode %d\n",
+ vi_window_mode);
+ return -EIO;
+ }
+ netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+ efx->vi_stride);
+ } else {
+ /* keep default VI stride */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report VI window mode, assuming vi_stride = %u\n",
+ efx->vi_stride);
+ }
+
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ efx->num_mac_stats = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware reports num_mac_stats = %u\n",
+ efx->num_mac_stats);
+ } else {
+ /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+ netif_dbg(efx, probe, efx->net_dev,
+ "firmware did not report num_mac_stats, assuming %u\n",
+ efx->num_mac_stats);
+ }
+
return 0;
}
+static void efx_ef10_read_licensed_features(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
+ return;
+
+ nic_data->licensed_features = MCDI_QWORD(outbuf,
+ LICENSING_V3_OUT_LICENSED_FEATURES);
+}
+
static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
@@ -589,17 +670,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However, until we
- * use TX option descriptors we need two TX queues per channel.
- */
- efx->max_channels = min_t(unsigned int,
- EFX_MAX_CHANNELS,
- efx_ef10_mem_map_size(efx) /
- (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- efx->max_tx_channels = efx->max_channels;
- if (WARN_ON(efx->max_channels == 0))
- return -EIO;
-
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
@@ -671,6 +741,22 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc < 0)
goto fail5;
+ efx_ef10_read_licensed_features(efx);
+
+ /* We can have one VI for each vi_stride-byte region.
+ * However, until we use TX option descriptors we need two TX queues
+ * per channel.
+ */
+ efx->max_channels = min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ efx_ef10_mem_map_size(efx) /
+ (efx->vi_stride * EFX_TXQ_TYPES));
+ efx->max_tx_channels = efx->max_channels;
+ if (WARN_ON(efx->max_channels == 0)) {
+ rc = -EIO;
+ goto fail5;
+ }
+
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
@@ -695,7 +781,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc && rc != -EPERM)
goto fail5;
- efx_ptp_probe(efx, NULL);
+ efx_ptp_defer_probe_with_channel(efx);
#ifdef CONFIG_SFC_SRIOV
if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
@@ -865,6 +951,11 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Link a buffer to each TX queue */
efx_for_each_channel(channel, efx) {
+ /* Extra channels, even those with TXQs (PTP), do not require
+ * PIO resources.
+ */
+ if (!channel->type->want_pio)
+ continue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -907,7 +998,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
} else {
tx_queue->piobuf =
nic_data->pio_write_base +
- index * EFX_VI_PAGE_SIZE + offset;
+ index * efx->vi_stride + offset;
tx_queue->piobuf_offset = offset;
netif_dbg(efx, probe, efx->net_dev,
"linked VI %u to PIO buffer %u offset %x addr %p\n",
@@ -1212,7 +1303,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
void __iomem *membase;
int rc;
- channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ channel_vis = max(efx->n_channels,
+ (efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -1253,19 +1346,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* for writing PIO buffers through.
*
* The UC mapping contains (channel_vis - 1) complete VIs and the
- * first half of the next VI. Then the WC mapping begins with
- * the second half of this last VI.
+ * first 4K of the next VI. Then the WC mapping begins with
+ * the remainder of this last VI.
*/
- uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
+ uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
/* pio_write_vi_base rounds down to give the number of complete
* VIs inside the UC mapping.
*/
- pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
- EFX_VI_PAGE_SIZE) -
+ efx->vi_stride) -
uc_mem_map_size);
max_vis = pio_write_vi_base + nic_data->n_piobufs;
} else {
@@ -1337,7 +1430,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
nic_data->pio_write_vi_base = pio_write_vi_base;
nic_data->pio_write_base =
nic_data->wc_membase +
- (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
uc_mem_map_size);
rc = efx_ef10_link_piobufs(efx);
@@ -1571,6 +1664,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+ EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+ EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+ EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+ EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+ EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+ EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+ EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+ EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+ EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+ EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+ EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+ EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+ EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+ EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+ EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+ EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+ EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+ EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+ EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+ EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+ EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
@@ -1646,6 +1762,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK ( \
+ (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
+ (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK ( \
+ (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_success - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
+ (1ULL << (EF10_STAT_ctpio_erase - 64)))
+
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1684,10 +1837,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
if (nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
- raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
} else {
raw_mask[1] = 0;
}
+ /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+ raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+ /* CTPIO stats appear in V3. Only show them on devices that actually
+ * support CTPIO. Although this driver doesn't use CTPIO others might,
+ * and we may be reporting the stats for the underlying port.
+ */
+ if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+ raw_mask[1] |= EF10_CTPIO_STAT_MASK;
#if BITS_PER_LONG == 64
BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1791,7 +1956,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -1839,7 +2004,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
- u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ u32 dma_len = efx->num_mac_stats * sizeof(u64);
struct efx_buffer stats_buf;
__le64 *dma_stats;
int rc;
@@ -1864,7 +2029,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
}
dma_stats = stats_buf.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1883,7 +2048,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
goto out;
}
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
WARN_ON_ONCE(1);
goto out;
@@ -1951,8 +2116,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
} else {
unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
- EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
channel->channel);
}
@@ -2263,12 +2429,25 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
int i;
BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
+ /* Only attempt to enable TX timestamping if we have the license for it,
+ * otherwise TXQ init will fail
+ */
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
+ tx_queue->timestamping = false;
+ /* Disable sync events on this channel. */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, false);
+ }
+
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
+ * TSOv2 cannot be used with Hardware timestamping.
*/
if (csum_offload && (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) {
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
+ !tx_queue->timestamping) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
@@ -2294,14 +2473,16 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
- MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
- INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
+ tx_queue->timestamping);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
@@ -2327,12 +2508,13 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
tx_queue->insert_count = 1;
txd = efx_tx_desc(tx_queue, 0);
- EFX_POPULATE_QWORD_4(*txd,
+ EFX_POPULATE_QWORD_5(*txd,
ESF_DZ_TX_DESC_IS_OPT, true,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
- ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+ ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
tx_queue->write_count = 1;
if (tso_v2) {
@@ -3233,8 +3415,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCPUDP_CKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3271,8 +3453,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
EFX_QWORD_VAL(*event));
else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
- (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
- rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+ (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+ rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
netdev_WARN(efx->net_dev,
"invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
EFX_QWORD_FMT "\n",
@@ -3307,7 +3489,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
- rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
rx_encap_hdr =
nic_data->datapath_caps &
@@ -3385,8 +3567,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
rx_l3_class, rx_l4_class,
event);
} else {
- bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
- rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+ bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_FZ_L4_CLASS_UDP;
switch (rx_encap_hdr) {
case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
@@ -3407,7 +3589,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
}
}
- if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
flags |= EFX_RX_PKT_TCP;
channel->irq_mod_score += 2 * n_packets;
@@ -3427,31 +3609,92 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
return n_packets;
}
-static int
+static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
+{
+ u32 tstamp;
+
+ tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
+ tstamp <<= 16;
+ tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
+
+ return tstamp;
+}
+
+static void
efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue;
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
- int tx_descs = 0;
+ unsigned int tx_ev_type;
+ u64 ts_part;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
- return 0;
+ return;
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ /* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
tx_ev_q_label % EFX_TXQ_TYPES);
- tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
- return tx_descs;
+ if (!tx_queue->timestamping) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+ return;
+ }
+
+ /* Transmit timestamps are only available for 8XXX series. They result
+ * in three events per packet. These occur in order, and are:
+ * - the normal completion event
+ * - the low part of the timestamp
+ * - the high part of the timestamp
+ *
+ * Each part of the timestamp is itself split across two 16 bit
+ * fields in the event.
+ */
+ tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
+
+ switch (tx_ev_type) {
+ case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
+ /* In case of Queue flush or FLR, we might have received
+ * the previous TX completion event but not the Timestamp
+ * events.
+ */
+ if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
+ ESF_DZ_TX_DESCR_INDX);
+ tx_queue->completed_desc_ptr =
+ tx_ev_desc_ptr & tx_queue->ptr_mask;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_minor = ts_part;
+ break;
+
+ case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
+ ts_part = efx_ef10_extract_event_ts(event);
+ tx_queue->completed_timestamp_major = ts_part;
+
+ efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ break;
+
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown tx event type %d (data "
+ EFX_QWORD_FMT ")\n",
+ channel->channel, tx_ev_type,
+ EFX_QWORD_VAL(*event));
+ break;
+ }
}
static void
@@ -3513,7 +3756,6 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
efx_qword_t event, *p_event;
unsigned int read_ptr;
int ev_code;
- int tx_descs = 0;
int spent = 0;
if (quota <= 0)
@@ -3553,13 +3795,7 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
}
break;
case ESE_DZ_EV_CODE_TX_EV:
- tx_descs += efx_ef10_handle_tx_event(channel, &event);
- if (tx_descs > efx->txq_entries) {
- spent = quota;
- goto out;
- } else if (++spent == quota) {
- goto out;
- }
+ efx_ef10_handle_tx_event(channel, &event);
break;
case ESE_DZ_EV_CODE_DRIVER_EV:
efx_ef10_handle_driver_event(channel, &event);
@@ -6034,7 +6270,8 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
efx_ef10_rx_enable_timestamping :
efx_ef10_rx_disable_timestamping;
- efx_for_each_channel(channel, efx) {
+ channel = efx_ptp_channel(efx);
+ if (channel) {
int rc = set(channel, temp);
if (en && rc != 0) {
efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
@@ -6392,7 +6629,7 @@ out_unlock:
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
- .mem_bar = EFX_MEM_VF_BAR,
+ .mem_bar = efx_ef10_vf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_vf,
.remove = efx_ef10_remove,
@@ -6500,7 +6737,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
const struct efx_nic_type efx_hunt_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = efx_ef10_pf_mem_bar,
.mem_map_size = efx_ef10_mem_map_size,
.probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 2c4bf9476c37..6a56778cf06c 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
- * Copyright 2012-2015 Solarflare Communications Inc.
+ * Copyright 2012-2017 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -79,6 +79,8 @@
#define ER_DZ_EVQ_TMR 0x00000420
#define ER_DZ_EVQ_TMR_STEP 8192
#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
#define ERF_DZ_TC_TIMER_MODE_LBN 14
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
#define ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -159,16 +161,24 @@
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define ESF_DZ_RX_L4_CLASS_LBN 45
-#define ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define ESE_DZ_L4_CLASS_RSVD7 7
-#define ESE_DZ_L4_CLASS_RSVD6 6
-#define ESE_DZ_L4_CLASS_RSVD5 5
-#define ESE_DZ_L4_CLASS_RSVD4 4
-#define ESE_DZ_L4_CLASS_RSVD3 3
-#define ESE_DZ_L4_CLASS_UDP 2
-#define ESE_DZ_L4_CLASS_TCP 1
-#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
#define ESF_DZ_RX_L3_CLASS_LBN 42
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
#define ESE_DZ_L3_CLASS_RSVD7 7
@@ -215,6 +225,8 @@
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
#define ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -332,6 +344,8 @@
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -341,7 +355,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2A_DESC */
+/* TX_TSO_V2_DESC_A */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -360,8 +374,7 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-
-/* TX_TSO_FATSO2B_DESC */
+/* TX_TSO_V2_DESC_B */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -375,11 +388,10 @@
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
/*************************************************************************/
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e3c492fcaff0..16757cfc5b29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -27,6 +27,7 @@
#include <net/udp_tunnel.h>
#include "efx.h"
#include "nic.h"
+#include "io.h"
#include "selftest.h"
#include "sriov.h"
@@ -895,12 +896,20 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
.post_remove = efx_channel_dummy_op_void,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
+ .want_txqs = efx_default_channel_want_txqs,
.keep_eventq = false,
+ .want_pio = true,
};
int efx_channel_dummy_op_int(struct efx_channel *channel)
@@ -952,31 +961,42 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n");
}
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising)
{
- efx->link_advertising = advertising;
- if (advertising) {
- if (advertising & ADVERTISED_Pause)
- efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
- else
- efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
- if (advertising & ADVERTISED_Asym_Pause)
- efx->wanted_fc ^= EFX_FC_TX;
- }
+ memcpy(efx->link_advertising, advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ efx->link_advertising[0] |= ADVERTISED_Autoneg;
+ if (advertising[0] & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising[0] & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+ bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
- if (efx->link_advertising) {
+ if (efx->link_advertising[0]) {
if (wanted_fc & EFX_FC_RX)
- efx->link_advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
else
- efx->link_advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
+ efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
if (wanted_fc & EFX_FC_TX)
- efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
}
}
@@ -1248,7 +1268,7 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
rc = pci_enable_device(pci_dev);
if (rc) {
@@ -1323,7 +1343,7 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- bar = efx->type->mem_bar;
+ bar = efx->type->mem_bar(efx);
pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
@@ -1489,6 +1509,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* Assign extra channels if possible */
+ efx->n_extra_tx_channels = 0;
j = efx->n_channels;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
@@ -1500,6 +1521,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
--j;
efx_get_channel(efx, j)->type =
efx->extra_channel_type[i];
+ if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+ efx->n_extra_tx_channels++;
}
}
@@ -2909,6 +2932,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{0} /* end of list */
};
@@ -2977,6 +3004,9 @@ static int efx_init_struct(struct efx_nic *efx,
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
+ efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+ efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 52c84b782901..0cddc5ad77b1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,11 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
-/* All VFs use BAR 0/1 for memory */
-#define EFX_MEM_BAR 2
-#define EFX_MEM_VF_BAR 0
-
int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
@@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
}
void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+ const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b5644110..4db2dc2bf52f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
netif_dbg(efx, drv, efx->net_dev,
"Autonegotiation is disabled\n");
rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
efx->type->prepare_enable_fc_tx(efx);
- old_adv = efx->link_advertising;
+ old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc);
- if (efx->link_advertising != old_adv ||
+ if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5334dc83d926..266b9bee1f3a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -818,17 +818,16 @@ static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static int
+static void
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return 0;
+ return;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
@@ -836,8 +835,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -856,8 +853,6 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
-
- return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -1090,7 +1085,7 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
@@ -1270,7 +1265,6 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int tx_packets = 0;
int spent = 0;
if (budget <= 0)
@@ -1304,12 +1298,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_farch_handle_tx_event(channel,
- &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
+ efx_farch_handle_tx_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_farch_handle_generated_event(channel, &event);
@@ -1680,20 +1669,21 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
*/
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count, buftbl_min, total_tx_channels;
#ifdef CONFIG_SFC_SRIOV
struct siena_nic_data *nic_data = efx->nic_data;
#endif
+ total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index afb94aa2c15e..89563170af52 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page size used as step between per-VI registers */
-#define EFX_VI_PAGE_SIZE 0x2000
+/* default VI stride (step between per-VI registers) is 8K */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
/* Calculate offset to page-mapped register */
-#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_VI_PAGE_SIZE + (reg))
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+ unsigned int reg)
+{
+ return page * efx->vi_stride + reg;
+}
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int page)
{
- reg = EFX_PAGED_REG(page, reg);
+ reg = efx_paged_reg(efx, page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EFX_OWORD_FMT "\n", reg,
@@ -262,7 +265,7 @@ static inline void
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg, unsigned int page)
{
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
@@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx,
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
- efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ efx_writed(efx, value, efx_paged_reg(efx, page, reg));
}
}
#define efx_writed_page_locked(efx, value, reg, page) \
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 154ef41d1927..ebd95972ae7b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 91fb54fd03d9..869d76f8f589 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -114,6 +114,8 @@
#define MCDI_HEADER_XFLAGS_WIDTH 8
/* Request response using event */
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
/* Maximum number of payload bytes */
#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
@@ -123,7 +125,7 @@
/* The MC can generate events for two reasons:
- * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
* - As a notification (link state, i2c event), controlled
* via MC_CMD_LOG_CTRL
*
@@ -279,6 +281,17 @@
/* Returned by MC_CMD_TESTASSERT if the action that should
* have caused an assertion failed to do so. */
#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
#define MC_CMD_ERR_CODE_OFST 0
@@ -360,6 +373,7 @@
/* enum: Fatal. */
#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -370,6 +384,8 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
/* enum: 100Mbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
/* enum: 1Gbs */
@@ -378,6 +394,12 @@
#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
/* enum: 40Gbs */
#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -456,8 +478,63 @@
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
/* enum: PTP status update */
#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
@@ -480,6 +557,22 @@
#define MCDI_EVENT_MUM_WATCHDOG 0x3
#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -552,73 +645,99 @@
* been processed and it may now resend the command
*/
#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
* timestamp
*/
#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
* timestamp
*/
#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
#define MCDI_EVENT_PTP_MAJOR_LBN 0
#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
* of timestamp
*/
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
* timestamp
*/
#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
#define MCDI_EVENT_PTP_MINOR_LBN 0
#define MCDI_EVENT_PTP_MINOR_WIDTH 32
/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
*/
#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
#define MCDI_EVENT_RX_ERR_DATA_LBN 0
#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
/* For CODE_PTP_TIME events, the major value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
/* For CODE_PTP_TIME events where report sync status is enabled, indicates
* whether the NIC clock has ever been set
*/
@@ -634,10 +753,17 @@
*/
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
/* Zero means that the request has been completed or authorized, and the driver
@@ -646,6 +772,10 @@
*/
#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -662,6 +792,7 @@
/* enum: Fatal. */
#define FCDI_EVENT_LEVEL_FATAL 0x3
#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -701,6 +832,7 @@
#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
#define FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -708,12 +840,15 @@
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -722,6 +857,7 @@
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
/* Index of MC port being referred to */
@@ -729,9 +865,11 @@
#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
/* FC Port index that matches the MC port index in SRC */
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
#define FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -748,14 +886,17 @@
#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
/* Seconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
/* Nanoseconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
@@ -783,6 +924,7 @@
/* enum: Fatal. */
#define MUM_EVENT_LEVEL_FATAL 0x3
#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
#define MUM_EVENT_SENSOR_ID_LBN 0
#define MUM_EVENT_SENSOR_ID_WIDTH 8
/* Enum values, see field(s): */
@@ -820,18 +962,23 @@
/* enum: Link fault has been asserted, or has cleared. */
#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
#define MUM_EVENT_SENSOR_DATA_LBN 0
#define MUM_EVENT_SENSOR_DATA_WIDTH 32
#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -864,7 +1011,9 @@
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_READ32_OUT msgresponse */
#define MC_CMD_READ32_OUT_LENMIN 4
@@ -882,13 +1031,14 @@
*/
#define MC_CMD_WRITE32 0x2
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -915,6 +1065,7 @@
* is a bitfield, with each bit as documented below.
*/
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -940,9 +1091,12 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
/* enum: Control should return to the caller rather than jumping */
#define MC_CMD_COPYCODE_JUMP_NONE 0x1
@@ -956,12 +1110,13 @@
*/
#define MC_CMD_SET_FUNC 0x4
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
/* MC_CMD_SET_FUNC_OUT msgresponse */
#define MC_CMD_SET_FUNC_OUT_LEN 0
@@ -973,7 +1128,7 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -982,9 +1137,11 @@
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
/* enum: indicates that the MC wasn't flash booted */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1007,11 +1164,13 @@
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
/* enum: No assertions have failed. */
#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
/* enum: A system-level assertion has failed. */
@@ -1024,6 +1183,7 @@
#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1034,7 +1194,9 @@
#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
/***********************************/
@@ -1050,12 +1212,14 @@
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
#define MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1076,23 +1240,29 @@
#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
/* placeholder, set to 0 */
#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
/* enum: Reserved version number to indicate "any" version. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
/* enum: Bootrom version value for Siena. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
/* enum: Bootrom version value for Huntington. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1104,9 +1274,11 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1136,41 +1308,54 @@
#define MC_CMD_PTP_OP_ENABLE 0x1
/* enum: Disable PTP packet timestamping operation. */
#define MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
#define MC_CMD_PTP_OP_TRANSMIT 0x3
/* enum: Read the current NIC time. */
#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
#define MC_CMD_PTP_OP_STATUS 0x5
/* enum: Adjust the PTP NIC's time. */
#define MC_CMD_PTP_OP_ADJUST 0x6
/* enum: Synchronize host and NIC time. */
#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
/* enum: Reset some of the PTP related statistics */
#define MC_CMD_PTP_OP_RESET_STATS 0xa
/* enum: Debug operations to MC. */
#define MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAWRITE 0xd
/* enum: Apply an offset to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -1191,7 +1376,7 @@
/* enum: Unsubscribe to stop receiving time events */
#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -1204,11 +1389,15 @@
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
/* enum: PTP, version 1 */
#define MC_CMD_PTP_MODE_V1 0x0
/* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -1225,16 +1414,21 @@
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -1244,17 +1438,30 @@
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_STATUS msgrequest */
#define MC_CMD_PTP_IN_STATUS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_ADJUST msgrequest */
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -1262,21 +1469,67 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
/* Host address in which to write "synchronization started" indication (64
* bits)
*/
@@ -1288,42 +1541,59 @@
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -1332,34 +1602,67 @@
/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of VLAN tags, 0 if not VLAN */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
/* Set of VLAN tags to filter against */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -1368,9 +1671,12 @@
/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable UUID filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
/* UUID to filter against */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -1380,18 +1686,25 @@
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable Domain filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
/* Domain number to filter against */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Set the clock source. */
#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
/* enum: Internal. */
#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
/* enum: External. */
@@ -1400,42 +1713,56 @@
/* MC_CMD_PTP_IN_RST_CLK msgrequest */
#define MC_CMD_PTP_IN_RST_CLK_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Reset value of Timer Reg. */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Enable or disable */
#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
/* enum: Enable */
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -1444,29 +1771,39 @@
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Unsubscribe options */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
/* enum: Unsubscribe a single queue */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
/* enum: Unsubscribe all queues */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
/* Event queue ID */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* NIC - Host System Clock Synchronization status */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
/* enum: Host System clock and NIC clock are not in sync */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
/* enum: Host System clock and NIC clock are synchronized */
@@ -1475,8 +1812,11 @@
* no longer in sync.
*/
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1485,12 +1825,16 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -1502,47 +1846,85 @@
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -1555,23 +1937,31 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
/* enum: Successful test */
#define MC_CMD_PTP_MANF_SUCCESS 0x0
/* enum: FPGA load failed */
@@ -1604,15 +1994,19 @@
#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -1628,9 +2022,11 @@
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
*/
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -1646,12 +2042,16 @@
* be assumed.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
/* enum: Major register has units of seconds, minor 2^-27s per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
/* Minimum acceptable value for a corrected synchronization timeset. When
* comparing host and NIC clock times, the MC returns a set of samples that
* contain the host start and end time, the MC time when the host start was
@@ -1660,46 +2060,66 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
/* Various PTP capabilities */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
@@ -1713,14 +2133,17 @@
*/
#define MC_CMD_CSR_READ32 0xc
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_CSR_READ32_OUT msgresponse */
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -1739,7 +2162,7 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -1747,7 +2170,9 @@
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -1756,6 +2181,7 @@
/* MC_CMD_CSR_WRITE32_OUT msgresponse */
#define MC_CMD_CSR_WRITE32_OUT_LEN 4
#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1776,6 +2202,7 @@
* sensors.
*/
#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
/* enum: OCSD (Option Card Sensor Data) sub-command. */
#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
/* enum: Last known valid HP sub-command. */
@@ -1790,10 +2217,12 @@
* NULL.)
*/
#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
/* MC_CMD_HP_OUT msgresponse */
#define MC_CMD_HP_OUT_LEN 4
#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
/* enum: OCSD stopped for this card. */
#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
/* enum: OCSD was successfully started with the address provided. */
@@ -1838,29 +2267,35 @@
* external devices.
*/
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
/* enum: Internal. */
#define MC_CMD_MDIO_BUS_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
#define MC_CMD_MDIO_CLAUSE22 0x20
/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
/* Status the MDIO commands return the raw status bits from the MDIO block. A
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
/* enum: Good. */
#define MC_CMD_MDIO_STATUS_GOOD 0x8
@@ -1879,22 +2314,27 @@
* external devices.
*/
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
/* MC_CMD_MDIO_CLAUSE22 0x20 */
/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -1902,6 +2342,7 @@
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -1912,7 +2353,7 @@
*/
#define MC_CMD_DBI_WRITE 0x12
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -1932,9 +2373,11 @@
/* MC_CMD_DBIWROP_TYPEDEF structuredef */
#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -1944,6 +2387,7 @@
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -1959,13 +2403,16 @@
#define MC_CMD_PORT_READ32_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
/***********************************/
@@ -1979,13 +2426,16 @@
#define MC_CMD_PORT_WRITE32_IN_LEN 8
/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -1999,6 +2449,7 @@
#define MC_CMD_PORT_READ128_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
@@ -2007,6 +2458,7 @@
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
/***********************************/
@@ -2020,6 +2472,7 @@
#define MC_CMD_PORT_WRITE128_IN_LEN 20
/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -2028,6 +2481,7 @@
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
/* MC_CMD_CAPABILITIES structuredef */
#define MC_CMD_CAPABILITIES_LEN 4
@@ -2072,24 +2526,54 @@
#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
*/
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -2103,7 +2587,7 @@
*/
#define MC_CMD_DBI_READX 0x19
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
@@ -2130,9 +2614,11 @@
/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -2149,7 +2635,7 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -2198,14 +2684,17 @@
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
#define MC_CMD_DRV_ATTACH_LBN 0
#define MC_CMD_DRV_ATTACH_WIDTH 1
#define MC_CMD_DRV_PREBOOT_LBN 1
#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
/* enum: Prefer to use full featured firmware */
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
@@ -2229,13 +2718,16 @@
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -2260,6 +2752,7 @@
#define MC_CMD_SHMUART_IN_LEN 4
/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
/* MC_CMD_SHMUART_OUT msgresponse */
#define MC_CMD_SHMUART_OUT_LEN 0
@@ -2297,6 +2790,7 @@
* (TBD).
*/
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -2314,8 +2808,10 @@
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -2346,31 +2842,54 @@
/* MC_CMD_RXD_MONITOR_IN msgrequest */
#define MC_CMD_RXD_MONITOR_IN_LEN 12
#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
/* MC_CMD_RXD_MONITOR_OUT msgresponse */
#define MC_CMD_RXD_MONITOR_OUT_LEN 80
#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
/***********************************/
@@ -2379,13 +2898,14 @@
*/
#define MC_CMD_PUTS 0x23
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
#define MC_CMD_PUTS_IN_UART_LBN 0
#define MC_CMD_PUTS_IN_UART_WIDTH 1
#define MC_CMD_PUTS_IN_PORT_LBN 1
@@ -2417,6 +2937,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -2433,8 +2954,10 @@
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
#define MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -2459,17 +2982,39 @@
#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
#define MC_CMD_PHY_CAP_DDM_LBN 12
#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
/* enum: Xaui. */
#define MC_CMD_MEDIA_XAUI 0x1
/* enum: CX4. */
@@ -2485,6 +3030,7 @@
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -2515,6 +3061,7 @@
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
/* enum: Run the PHY's short cable BIST. */
#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
/* enum: Run the PHY's long cable BIST. */
@@ -2556,6 +3103,7 @@
#define MC_CMD_POLL_BIST_OUT_LEN 8
/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
/* enum: Running. */
#define MC_CMD_POLL_BIST_RUNNING 0x1
/* enum: Passed. */
@@ -2565,19 +3113,26 @@
/* enum: Timed-out. */
#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
/* enum: Ok. */
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
/* enum: Open. */
@@ -2590,14 +3145,17 @@
#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
@@ -2605,9 +3163,11 @@
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
/* enum: Complete. */
#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
/* enum: Bus switch off I2C write. */
@@ -2631,9 +3191,11 @@
#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
/* enum: Test has completed. */
#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
/* enum: RAM test - walk ones. */
@@ -2650,8 +3212,10 @@
#define MC_CMD_POLL_BIST_MEM_ECC 0x6
/* Failure address, only valid if result is POLL_BIST_FAILED */
#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
/* Bus or address space to which the failure address corresponds */
#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
/* enum: MC MIPS bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
@@ -2672,14 +3236,19 @@
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
/* Actual value read from RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
/* ECC error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
/* ECC parity error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
/* ECC fatal error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
/***********************************/
@@ -2831,6 +3400,143 @@
/* Enum values, see field(s): */
/* 100M */
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -2848,17 +3554,22 @@
#define MC_CMD_GET_LINK_OUT_LEN 28
/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
/* Autonegotiated speed in mbit/s. The link may still be down even if this
* reads non-zero.
*/
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -2873,9 +3584,11 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -2899,8 +3612,10 @@
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -2909,12 +3624,14 @@
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
/* A loopback speed of "0" is supported, and means (choose any available
* speed).
*/
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -2932,6 +3649,7 @@
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
#define MC_CMD_LED_DEFAULT 0x2 /* enum */
@@ -2954,17 +3672,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
@@ -2978,6 +3700,7 @@
/* enum: Issue flow control. */
#define MC_CMD_FCNTL_GENERATE 0x5
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
@@ -2987,17 +3710,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
@@ -3011,6 +3738,7 @@
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* Select which parameters to configure. A parameter will only be modified if
@@ -3019,6 +3747,7 @@
* set).
*/
#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -3040,6 +3769,7 @@
* to 0.
*/
#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
/***********************************/
@@ -3144,6 +3874,7 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -3158,9 +3889,16 @@
#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
/* port id so vadapter stats can be provided */
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -3305,9 +4043,126 @@
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_END 0x5f
-#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
/***********************************/
/* MC_CMD_SRIOV
@@ -3318,21 +4173,28 @@
/* MC_CMD_SRIOV_IN msgrequest */
#define MC_CMD_SRIOV_IN_LEN 12
#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
/* MC_CMD_SRIOV_OUT msgresponse */
#define MC_CMD_SRIOV_OUT_LEN 8
#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -3342,6 +4204,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -3352,6 +4215,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
@@ -3403,10 +4267,12 @@
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
/* enum: Magic */
#define MC_CMD_WOL_TYPE_MAGIC 0x0
/* enum: MS Windows Magic */
@@ -3428,7 +4294,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -3437,9 +4305,13 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -3448,7 +4320,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -3461,7 +4335,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -3476,8 +4352,11 @@
/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -3486,6 +4365,7 @@
/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -3499,6 +4379,7 @@
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -3516,6 +4397,7 @@
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -3556,6 +4438,7 @@
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -3612,47 +4495,65 @@
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_INFO_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_OUT_LEN 24
#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
*/
#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
/***********************************/
@@ -3670,6 +4571,7 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
@@ -3680,9 +4582,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3703,20 +4607,26 @@
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
/* Optional control info. If a partition is stored with an A/B versioning
* scheme (i.e. in more than one physical partition in NVRAM) the host can set
* this to control which underlying physical partition is used to read data
@@ -3726,6 +4636,7 @@
* verifying by reading with MODE=TARGET_BACKUP.
*/
#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
/* enum: Same as omitting MODE: caller sees data in current partition unless it
* holds the write lock in which case it sees data in the partition it is
* updating.
@@ -3765,10 +4676,13 @@
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -3791,10 +4705,13 @@
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -3815,9 +4732,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
* request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -3826,10 +4745,13 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -3848,16 +4770,19 @@
* This process takes a few seconds to complete. So is likely to take more than
* the MCDI timeout. Hence signature verification is initiated when
* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
/* Result of nvram update completion processing */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
/* enum: Verify succeeded without any errors. */
#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
/* enum: CMS format verification failed due to an internal error. */
@@ -3884,6 +4809,12 @@
* Trusted approver's list.
*/
#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
/***********************************/
@@ -3911,6 +4842,7 @@
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
/* MC_CMD_REBOOT_OUT msgresponse */
@@ -3947,11 +4879,12 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
/* enum: Normal. */
#define MC_CMD_REBOOT_MODE_NORMAL 0x0
/* enum: Power-on Reset. */
@@ -3966,6 +4899,7 @@
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
/***********************************/
@@ -4001,7 +4935,7 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -4015,12 +4949,14 @@
* Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
*/
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
/* enum: Controller temperature: degC */
#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
/* enum: Phy common temperature: degC */
@@ -4183,6 +5119,20 @@
#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
/* enum: Board temperature (back): degC */
#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4196,6 +5146,7 @@
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO_OUT */
#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -4247,7 +5198,7 @@
*/
#define MC_CMD_READ_SENSORS 0x42
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
@@ -4266,6 +5217,7 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -4319,6 +5271,7 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
/* enum: Ok. */
#define MC_CMD_PHY_STATE_OK 0x1
/* enum: Faulty. */
@@ -4355,6 +5308,7 @@
/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4371,6 +5325,7 @@
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
@@ -4381,13 +5336,16 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -4398,6 +5356,7 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -4412,7 +5371,9 @@
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -4451,6 +5412,7 @@
#define MC_CMD_TESTASSERT_V2_IN_LEN 4
/* How to provoke the assertion */
#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
* you're testing firmware, this is what you want.
*/
@@ -4486,6 +5448,7 @@
#define MC_CMD_WORKAROUND_IN_LEN 8
/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -4514,6 +5477,7 @@
* the workaround
*/
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
@@ -4523,6 +5487,7 @@
*/
#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
@@ -4543,6 +5508,7 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -4550,6 +5516,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -4568,12 +5535,14 @@
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
/* enum: Passed. */
#define MC_CMD_NVRAM_TEST_PASS 0x0
/* enum: Failed. */
@@ -4594,12 +5563,16 @@
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -4608,10 +5581,13 @@
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
/* enum: Out. */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
/* enum: In. */
@@ -4626,21 +5602,26 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -4657,9 +5638,13 @@
/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
/***********************************/
@@ -4680,6 +5665,7 @@
#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
/* total number of partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
/* type ID code for each of NUM_PARTITIONS partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -4700,6 +5686,7 @@
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -4707,7 +5694,9 @@
#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -4716,6 +5705,7 @@
#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
/* Subtype ID code for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
/* 1st component of W.X.Y.Z version number for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -4756,8 +5746,10 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
/* Number of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
/***********************************/
@@ -4772,6 +5764,7 @@
#define MC_CMD_CLP_IN_LEN 4
/* Sub operation */
#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
/* enum: Return to factory default settings */
#define MC_CMD_CLP_OP_DEFAULT 0x1
/* enum: Set MAC address */
@@ -4789,6 +5782,7 @@
/* MC_CMD_CLP_IN_DEFAULT msgrequest */
#define MC_CMD_CLP_IN_DEFAULT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -4796,6 +5790,7 @@
/* MC_CMD_CLP_IN_SET_MAC msgrequest */
#define MC_CMD_CLP_IN_SET_MAC_LEN 12
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MAC address assigned to port */
#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -4809,6 +5804,7 @@
/* MC_CMD_CLP_IN_GET_MAC msgrequest */
#define MC_CMD_CLP_IN_GET_MAC_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -4822,6 +5818,7 @@
/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* Boot flag */
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -4832,6 +5829,7 @@
/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -4849,11 +5847,12 @@
*/
#define MC_CMD_MUM 0x57
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_MUM_IN msgrequest */
#define MC_CMD_MUM_IN_LEN 4
#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_OP_LBN 0
#define MC_CMD_MUM_IN_OP_WIDTH 8
/* enum: NULL MCDI command to MUM */
@@ -4893,26 +5892,32 @@
#define MC_CMD_MUM_IN_NULL_LEN 4
/* MUM cmd header */
#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_READ msgrequest */
#define MC_CMD_MUM_IN_READ_LEN 16
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to read from registers of */
#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE 0x1
/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
/* 32-bit address to read from */
#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
/* Number of words to read. */
#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
/* MC_CMD_MUM_IN_WRITE msgrequest */
#define MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -4920,12 +5925,15 @@
#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to write to registers of */
#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
/* MC_CMD_MUM_DEV_HITTITE 0x1 */
/* 32-bit address to write to */
#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
/* Words to write */
#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -4938,12 +5946,16 @@
#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MUM I2C cmd code */
#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
/* Number of bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
/* Number of bytes to read */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
/* Bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -4954,21 +5966,28 @@
#define MC_CMD_MUM_IN_LOG_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
/* Enable/disable debug output to UART */
#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
/* MC_CMD_MUM_IN_GPIO msgrequest */
#define MC_CMD_MUM_IN_GPIO_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -4981,40 +6000,56 @@
/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -5027,26 +6062,34 @@
/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
@@ -5054,7 +6097,9 @@
#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -5064,13 +6109,16 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Bit-mask of clocks to be programmed */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
/* Control flags for clock programming */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -5082,19 +6130,24 @@
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Enable/Disable FPGA config from flash */
#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_QSFP msgrequest */
#define MC_CMD_MUM_IN_QSFP_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -5104,52 +6157,77 @@
#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5160,6 +6238,7 @@
/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -5197,8 +6276,10 @@
#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
/* The first 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -5207,8 +6288,10 @@
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
/* The first 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -5216,11 +6299,14 @@
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -5249,6 +6335,7 @@
/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -5256,6 +6343,7 @@
/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -5263,7 +6351,9 @@
/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -5272,6 +6362,7 @@
/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -5279,6 +6370,7 @@
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -5287,11 +6379,14 @@
/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -5299,12 +6394,14 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
/* Discrete (soldered) DDR resistor strap info */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
/* Number of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
/* Array of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -5365,6 +6462,7 @@
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
/* enum: An invalid port handle. */
#define EVB_PORT_ID_NULL 0x0
/* enum: The port assigned to this function.. */
@@ -5460,6 +6558,10 @@
#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
/* enum: MUM firmware partition */
#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
/* enum: MUM Application table partition. */
@@ -5474,8 +6576,8 @@
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: UEFI expansion ROM if separate from PXE */
#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
-/* enum: Spare partition 0 */
-#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
@@ -5488,6 +6590,27 @@
#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5502,6 +6625,7 @@
/* LICENSED_APP_ID structuredef */
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
/* enum: OpenOnload */
#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
@@ -5526,6 +6650,14 @@
#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
/* enum: Capture SolarSystem 40G */
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -5590,6 +6722,14 @@
#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -5636,6 +6776,18 @@
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
/* enum: This is a TX completion event, not a timestamp */
#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
/* enum: This is the low part of a TX timestamp event */
#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
/* enum: This is the high part of a TX timestamp event */
@@ -5669,6 +6821,19 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -5676,7 +6841,7 @@
*/
#define MC_CMD_READ_REGS 0x50
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -5709,17 +6874,22 @@
#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -5735,6 +6905,7 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5745,13 +6916,16 @@
#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5762,6 +6936,7 @@
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -5774,6 +6949,7 @@
#define MC_CMD_INIT_EVQ_OUT_LEN 4
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -5781,17 +6957,22 @@
#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -5828,6 +7009,7 @@
*/
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -5838,13 +7020,16 @@
#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -5855,6 +7040,7 @@
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -5867,8 +7053,10 @@
#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
/* Actual configuration applied on the card */
#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -5916,17 +7104,22 @@
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -5945,8 +7138,10 @@
#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -5961,17 +7156,22 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -6007,8 +7207,10 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6017,6 +7219,7 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -6040,18 +7243,23 @@
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6072,8 +7280,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -6088,18 +7298,23 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6122,10 +7337,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6135,6 +7354,7 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
/* Flags related to Qbb flow control mode. */
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -6161,6 +7381,7 @@
* passed to INIT_EVQ
*/
#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_EVQ_OUT msgresponse */
#define MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -6178,6 +7399,7 @@
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_RXQ_OUT msgresponse */
#define MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -6195,6 +7417,7 @@
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_TXQ_OUT msgresponse */
#define MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -6212,6 +7435,7 @@
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
/* Bits 0 - 63 of event */
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -6237,6 +7461,7 @@
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
@@ -6252,6 +7477,7 @@
#define MC_PROXY_STATUS_BUFFER_LEN 16
/* Handle allocated by the firmware for this proxy transaction */
#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
/* enum: An invalid handle. */
#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
@@ -6282,6 +7508,7 @@
* elevated privilege mask granted to the requesting function.
*/
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
@@ -6298,6 +7525,7 @@
/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6309,6 +7537,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6318,6 +7547,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6328,8 +7558,10 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -6337,6 +7569,7 @@
/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6348,6 +7581,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -6357,6 +7591,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6367,12 +7602,15 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6392,7 +7630,9 @@
/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
* is stored in the REPLY_BUFF.
*/
@@ -6408,6 +7648,7 @@
*/
#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -6427,17 +7668,22 @@
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
/* Size of buffer table pages to use, in bytes (note that only a few values are
* legal on any specific hardware).
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
/* Buffer table IDs for use in DMA descriptors. */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
/***********************************/
@@ -6453,10 +7699,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
/* ID */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Num entries */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* Buffer table entry address */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -6479,48 +7728,11 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-/* PORT_CONFIG_ENTRY structuredef */
-#define PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define PORT_CONFIG_ENTRY_CORE_OFST 1
-#define PORT_CONFIG_ENTRY_CORE_LEN 1
-#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
-#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
-#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
-#define PORT_CONFIG_ENTRY_CORE_LBN 8
-#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define PORT_CONFIG_ENTRY_LANES_OFST 4
-#define PORT_CONFIG_ENTRY_LANES_LBN 32
-#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -6534,6 +7746,7 @@
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
/* enum: single-recipient filter insert */
#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
/* enum: single-recipient filter remove */
@@ -6554,8 +7767,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -6586,6 +7801,7 @@
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6598,8 +7814,10 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6614,13 +7832,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
@@ -6653,8 +7874,10 @@
#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
/* Firmware defined register 1 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -6673,6 +7896,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* filter handle (for remove / unsubscribe operations) */
@@ -6683,8 +7907,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -6743,6 +7969,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
/* enum: drop packets */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
@@ -6755,8 +7982,10 @@
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -6771,13 +8000,16 @@
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
@@ -6810,11 +8042,13 @@
#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
* protocol is GRE) to match (as bytes in network order; set last byte to 0 for
* VXLAN/NVGRE, or 1 for Geneve)
*/
#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
@@ -6880,10 +8114,12 @@
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
* order; set last 12 bytes to 0 for IPv4 address)
*/
@@ -6899,6 +8135,7 @@
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6918,6 +8155,7 @@
#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_EXT_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6944,6 +8182,7 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
/* enum: read flags indicating restrictions on filter insertion for the calling
@@ -6966,10 +8205,12 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* number of supported match types */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
/* array of supported match types (valid MATCH_FIELDS values for
* MC_CMD_FILTER_OP) sorted in decreasing priority order
*/
@@ -6982,10 +8223,12 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* bitfield of filter insertion restrictions */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
@@ -7005,11 +8248,16 @@
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
/* enum: RX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
@@ -7021,26 +8269,33 @@
#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
/* selector (for MISC_STATE target) */
#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
/* enum: Port to datapath mapping */
#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -7049,6 +8304,7 @@
#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
/* value read (for DMEM reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
/* value read (for LUE reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -7093,6 +8349,7 @@
#define MC_CMD_SET_PF_COUNT_IN_LEN 4
/* New number of PFs on the device. */
#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -7113,6 +8370,7 @@
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
/***********************************/
@@ -7127,6 +8385,7 @@
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -7144,8 +8403,10 @@
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
* Use extended version in new code.
@@ -7153,21 +8414,26 @@
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7201,15 +8467,20 @@
#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
/***********************************/
@@ -7224,19 +8495,24 @@
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF, or 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous, 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -7258,12 +8534,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -7278,6 +8557,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -7311,6 +8591,7 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -7392,6 +8673,7 @@
#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
/***********************************/
@@ -7406,6 +8688,7 @@
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -7423,6 +8706,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -7445,6 +8729,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
/***********************************/
@@ -7459,6 +8744,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* Transaction processing steering hint 1 for use with the Rx Queue. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -7478,6 +8764,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -7494,6 +8781,7 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* enum: MISC. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
/* enum: IDO. */
@@ -7506,10 +8794,12 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -7557,10 +8847,12 @@
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -7627,6 +8919,7 @@
* in a command from the host.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
@@ -7636,6 +8929,7 @@
* mc_flash_layout.h.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
@@ -7672,12 +8966,14 @@
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
/* enum: Last chunk, containing checksum rather than data */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
/* enum: Abort download of this item */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
/* Length of this chunk in bytes */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
/* Data for this chunk */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -7688,8 +8984,10 @@
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
/* enum: Code download OK, completed. */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
/* enum: Code download aborted as requested. */
@@ -7726,6 +9024,7 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -7793,6 +9092,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -7813,6 +9114,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7822,6 +9125,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -7848,7 +9153,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7864,6 +9171,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7888,7 +9197,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -7901,12 +9212,16 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -7915,6 +9230,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -7982,6 +9298,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8002,6 +9320,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8011,6 +9331,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8037,7 +9359,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8053,6 +9377,8 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8077,7 +9403,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8090,14 +9418,19 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8124,6 +9457,18 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8181,9 +9526,10 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -8251,6 +9597,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
/* enum: BIST RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
@@ -8271,6 +9619,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8280,6 +9630,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
/* enum: BIST TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
@@ -8306,7 +9658,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8322,6 +9676,8 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8346,7 +9702,9 @@
* (Huntington development only)
*/
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
@@ -8359,14 +9717,19 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8393,6 +9756,18 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -8463,6 +9838,348 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: reserved value - do not use (bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9 0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
/***********************************/
@@ -8502,6 +10219,7 @@
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
/***********************************/
@@ -8516,6 +10234,7 @@
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -8533,17 +10252,22 @@
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
/* the desired maximum fill level */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -8561,10 +10285,13 @@
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8573,25 +10300,32 @@
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8600,18 +10334,23 @@
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -8629,8 +10368,10 @@
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -8648,6 +10389,7 @@
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -8665,8 +10407,10 @@
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of v-switch to create. */
#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
@@ -8679,6 +10423,7 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -8689,6 +10434,7 @@
* v-ports with this number of tags.
*/
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -8706,6 +10452,7 @@
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -8725,6 +10472,7 @@
#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -8742,8 +10490,10 @@
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of the new v-port. */
#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN (obsolete) */
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
/* enum: VEB (obsolete) */
@@ -8764,6 +10514,7 @@
#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
/* Flags controlling v-port creation */
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -8773,8 +10524,10 @@
* v-switch.
*/
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8784,6 +10537,7 @@
#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
/* The handle of the new v-port */
#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
/***********************************/
@@ -8798,6 +10552,7 @@
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_FREE_OUT msgresponse */
#define MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -8815,18 +10570,23 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Flags controlling v-adaptor creation */
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
/* The number of VLAN tags to transparently insert/remove. */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8853,6 +10613,7 @@
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -8870,6 +10631,7 @@
#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* The new MAC address to assign to this v-adaptor */
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -8890,6 +10652,7 @@
#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -8910,15 +10673,19 @@
#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
/* The number of VLAN tags that may still be added */
#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -8933,8 +10700,10 @@
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
/* The target function to modify. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -8955,9 +10724,13 @@
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
/* Write enable bits 0-3, set to write, clear to read. */
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -8969,9 +10742,13 @@
*/
#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
/***********************************/
@@ -8986,11 +10763,13 @@
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
/* The handle of the new Onload stack */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
/***********************************/
@@ -9005,6 +10784,7 @@
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -9022,8 +10802,10 @@
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of context to allocate */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
/* enum: Allocate a context for exclusive use. The key and indirection table
* must be explicitly configured.
*/
@@ -9037,6 +10819,7 @@
* in the indirection table will be in the range 0 to NUM_QUEUES-1.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -9045,6 +10828,7 @@
* handle.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
/* enum: guaranteed invalid RSS context handle value */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
@@ -9061,6 +10845,7 @@
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -9078,6 +10863,7 @@
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -9098,6 +10884,7 @@
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -9118,6 +10905,7 @@
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* The 128-byte indirection table (1 byte per entry) */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -9138,6 +10926,7 @@
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -9158,6 +10947,7 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* Hash control flags. The _EN bits are always supported, but new modes are
* available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
* in this case, the MODE fields may be set to non-zero values, and will take
@@ -9171,6 +10961,7 @@
* particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9210,6 +11001,7 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -9227,6 +11019,7 @@
* always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9263,11 +11056,13 @@
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
* offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
* referenced RSS contexts must span no more than this number.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -9276,6 +11071,7 @@
* handle.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
/* enum: guaranteed invalid .1p mapping handle value */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
@@ -9292,6 +11088,7 @@
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -9309,6 +11106,7 @@
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
* handle)
*/
@@ -9331,6 +11129,7 @@
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -9356,10 +11155,13 @@
#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
/* Base absolute interrupt vector number. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
/***********************************/
@@ -9376,10 +11178,13 @@
* let the system find a suitable base.
*/
#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -9397,6 +11202,7 @@
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9417,6 +11223,7 @@
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9437,6 +11244,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -9444,6 +11252,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
/* The number of MAC addresses returned */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
/* Array of MAC addresses */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -9465,8 +11274,10 @@
#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
/* The handle of the v-port */
#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
/* Flags requesting what should be changed. */
#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -9476,14 +11287,17 @@
* v-switch.
*/
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
/* The number of MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
/* MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -9492,6 +11306,7 @@
/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
@@ -9508,15 +11323,18 @@
#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
/* The number of VLAN tags that may be used on a v-adaptor connected to this
* EVB port.
*/
#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -9528,14 +11346,16 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Number of buffer table entries to dump. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -9559,6 +11379,7 @@
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
@@ -9588,6 +11409,7 @@
/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -9611,8 +11433,10 @@
#define MC_CMD_GET_CLOCK_OUT_LEN 8
/* System frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
/* DPCPU frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/***********************************/
@@ -9621,36 +11445,43 @@
*/
#define MC_CMD_SET_CLOCK 0xad
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 28
/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
/* enum: Leave the system clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
/* enum: Leave the inter-core clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
/* enum: Leave the DPCPU clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for PCS clock domain */
#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
/* enum: Leave the PCS clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for MC clock domain */
#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
/* enum: Leave the MC clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for rmon clock domain */
#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
/* enum: Leave the rmon clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for vswitch clock domain */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
/* enum: Leave the vswitch clock domain frequency unchanged */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
@@ -9658,30 +11489,37 @@
#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
/* enum: The system clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
/* enum: The inter-core clock domain doesn't exist / isn't used */
#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/* enum: The dpcpu clock domain doesn't exist */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
/* Resulting PCS frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
/* enum: The PCS clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
/* Resulting MC frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
/* enum: The MC clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
/* Resulting rmon frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
/* enum: The rmon clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
/* Resulting vswitch frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
/* enum: The vswitch clock domain doesn't exist / isn't controlled */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
@@ -9692,11 +11530,12 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
/* enum: RxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
@@ -9761,12 +11600,15 @@
#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
/* Register data to write. Only valid in write/write-read. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
/* Register address. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
/* MC_CMD_DPCPU_RPC_OUT msgresponse */
#define MC_CMD_DPCPU_RPC_OUT_LEN 36
#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
/* DATA */
#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -9777,9 +11619,13 @@
#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
/***********************************/
@@ -9794,6 +11640,7 @@
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -9811,6 +11658,7 @@
#define MC_CMD_SHMBOOT_OP_IN_LEN 4
/* Identifies the operation to perform */
#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
/* enum: Copy slave_data section to the slave core. (Greenport only) */
#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
@@ -9824,13 +11672,16 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -9850,53 +11701,77 @@
*/
#define MC_CMD_DUMP_DO 0xe8
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
/* enum: The uart port this command was received over (if using a uart
* transport)
*/
#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/* MC_CMD_DUMP_DO_OUT msgresponse */
#define MC_CMD_DUMP_DO_OUT_LEN 4
#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
/***********************************/
@@ -9905,41 +11780,64 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/***********************************/
@@ -9950,17 +11848,20 @@
*/
#define MC_CMD_SET_PSU 0xea
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
/* desired value, eg voltage in mV */
#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
/* MC_CMD_SET_PSU_OUT msgresponse */
#define MC_CMD_SET_PSU_OUT_LEN 0
@@ -9980,7 +11881,9 @@
/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
/***********************************/
@@ -10016,12 +11919,16 @@
#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -10044,12 +11951,16 @@
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
/* Offset from which to read the data */
#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
/* MC_CMD_UART_RECV_DATA_IN msgresponse */
#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -10057,12 +11968,16 @@
#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -10075,14 +11990,16 @@
*/
#define MC_CMD_READ_FUSES 0xf0
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
/* Length of data to read in bytes */
#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
/* MC_CMD_READ_FUSES_OUT msgresponse */
#define MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -10090,6 +12007,7 @@
#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
/* Length of returned OTP data in bytes */
#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
/* Returned data */
#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -10197,6 +12115,60 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (0-7, Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -10268,7 +12240,7 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
@@ -10290,9 +12262,9 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
/* enum: TX Amplitude Fine control (Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
@@ -10361,7 +12333,24 @@
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10393,10 +12382,12 @@
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
/***********************************/
@@ -10594,6 +12585,7 @@
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10636,6 +12628,7 @@
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10647,23 +12640,30 @@
#define MC_CMD_LICENSING_OUT_LEN 28
/* count of application keys which are valid */
#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being blacklisted */
#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being for the wrong node
*/
#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10683,6 +12683,7 @@
#define MC_CMD_LICENSING_V3_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
@@ -10696,20 +12697,26 @@
#define MC_CMD_LICENSING_V3_OUT_LEN 88
/* count of keys which are valid */
#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
/* count of keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
/* count of keys which are invalid due to being for the wrong node */
#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
@@ -10750,8 +12757,10 @@
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
/* type of license (eg 3) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
/* length of the license ID (in bytes) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
/* the unique license ID of the adapter */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -10789,11 +12798,13 @@
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10824,6 +12835,7 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
@@ -10874,8 +12886,10 @@
#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
/* application ID */
#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
/* enum: mask application */
@@ -10900,8 +12914,10 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
/* application ID */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
/* validation challenge */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -10910,6 +12926,7 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
/* feature expiry (time_t) */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
/* validation response */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -10918,10 +12935,13 @@
#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
/* application ID */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
/* flag */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -10959,8 +12979,10 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
/* application expiry units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
/* enum: expiry units are accounting units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
@@ -10984,7 +13006,7 @@
*/
#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -10995,6 +13017,7 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
/* enum: turn the features off */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
/* enum: turn the features back on */
@@ -11014,12 +13037,13 @@
*/
#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
/* operation code */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
/* enum: install a new license, overwriting any existing temporary license.
* This is an asynchronous operation owing to the time taken to validate an
* ECDSA license
@@ -11037,6 +13061,7 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
/* ECDSA license and signature */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -11044,15 +13069,18 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
/* status code */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
/* enum: finished validating and installing license */
#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
/* enum: license validation and installation in progress */
@@ -11084,14 +13112,17 @@
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11101,6 +13132,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11123,20 +13155,24 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11153,6 +13189,7 @@
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
/* the type of configuration setting to change */
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
* internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
*/
@@ -11166,6 +13203,7 @@
* on the type of configuration setting being changed
*/
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* new value: the details depend on the type of configuration setting being
* changed
*/
@@ -11190,12 +13228,14 @@
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
/* the type of configuration setting to read */
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
* the type of configuration setting being read
*/
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -11228,12 +13268,15 @@
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
@@ -11243,6 +13286,7 @@
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11265,18 +13309,22 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -11291,16 +13339,22 @@
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
/* The rx queue to get stats for. */
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
/***********************************/
@@ -11309,6 +13363,8 @@
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
@@ -11316,20 +13372,27 @@
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
/* The maximum number of PFs the device can expose */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
/* The maximum number of VFs the device can expose in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
/* The maximum number of MSI-X vectors the device can provide in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each PF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each VF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one PF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one VF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
/***********************************/
@@ -11347,10 +13410,13 @@
#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
/* Default (canonical) board mode */
#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
/* Current board mode */
#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
/***********************************/
@@ -11359,21 +13425,26 @@
*/
#define MC_CMD_READ_ATB 0x100
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_ATB_IN msgrequest */
#define MC_CMD_READ_ATB_IN_LEN 16
#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
/* MC_CMD_READ_ATB_OUT msgresponse */
#define MC_CMD_READ_ATB_OUT_LEN 4
#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
/***********************************/
@@ -11390,7 +13461,9 @@
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -11425,6 +13498,7 @@
* 1,3 = 0x00030001
*/
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
@@ -11434,6 +13508,7 @@
* set to 1.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
@@ -11460,6 +13535,10 @@
* only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -11469,6 +13548,7 @@
#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
/* For an admin function, always all the privileges are reported. */
#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
/***********************************/
@@ -11485,12 +13565,14 @@
* e.g. VF 1,3 = 0x00030001
*/
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
/* New link state mode to be set */
#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
@@ -11501,11 +13583,12 @@
/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
/***********************************/
/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
* parameter to MC_CMD_INIT_RXQ.
*/
#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -11519,8 +13602,10 @@
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
/* Minimum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
/* Maximum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
/***********************************/
@@ -11529,7 +13614,7 @@
*/
#define MC_CMD_FUSE_DIAGS 0x102
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_FUSE_DIAGS_IN msgrequest */
#define MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -11538,28 +13623,40 @@
#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
/* Total number of mismatched bits between pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
/***********************************/
@@ -11576,6 +13673,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
/* The groups of functions to have their privilege masks modified. */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
@@ -11584,6 +13682,7 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
/* For VFS_OF_PF specify the PF, for ONE specify the target function */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -11592,10 +13691,12 @@
* refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
/* Privileges to be removed from the target functions. For privilege
* definitions refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -11613,8 +13714,10 @@
#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -11633,7 +13736,7 @@
*/
#define MC_CMD_XPM_WRITE_BYTES 0x104
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -11641,8 +13744,10 @@
#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
/* Start address (byte) */
#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
/* Data */
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -11659,14 +13764,16 @@
*/
#define MC_CMD_XPM_READ_SECTOR 0x105
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
/* Sector index */
#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
/* Sector size */
#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -11674,9 +13781,11 @@
#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
/* Sector type */
#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
/* Sector data */
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
@@ -11691,7 +13800,7 @@
*/
#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -11708,10 +13817,12 @@
#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
/* Sector data */
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -11722,6 +13833,7 @@
#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
/* New sector index */
#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
/***********************************/
@@ -11730,12 +13842,13 @@
*/
#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
/* Sector index */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -11747,14 +13860,16 @@
*/
#define MC_CMD_XPM_BLANK_CHECK 0x108
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -11762,6 +13877,7 @@
#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
/* Total number of bad (non-blank) locations */
#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
* into MCDI response)
*/
@@ -11777,14 +13893,16 @@
*/
#define MC_CMD_XPM_REPAIR 0x109
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_REPAIR_IN msgrequest */
#define MC_CMD_XPM_REPAIR_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
/* MC_CMD_XPM_REPAIR_OUT msgresponse */
#define MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -11797,7 +13915,7 @@
*/
#define MC_CMD_XPM_DECODER_TEST 0x10a
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -11816,7 +13934,7 @@
*/
#define MC_CMD_XPM_WRITE_TEST 0x10b
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -11842,10 +13960,13 @@
#define MC_CMD_EXEC_SIGNED_IN_LEN 28
/* the length of code to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
/* the length of date to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
/* the XPM sector containing the key to use */
#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
/* the expected CMAC value */
#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -11868,11 +13989,34 @@
#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
/* the length of data area to clear */
#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
/***********************************/
/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
* Configure UDP ports for tunnel encapsulation hardware acceleration. The
@@ -11913,27 +14057,6 @@
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
-/* enum: Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
/***********************************/
/* MC_CMD_RX_BALANCING
@@ -11950,12 +14073,16 @@
#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
/* The VLAN priority associated to the table index and vFIFO */
#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
/* The resulting bit of SRC^DST for indexing the table */
#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
/* The RX engine to which the vFIFO in the table entry will point to */
#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -11976,8 +14103,10 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
/* The tag to be appended */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
/* The length of the data */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
/* The data to be contained in the TLV structure */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -12002,6 +14131,7 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
/* Data type to be checked */
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -12009,10 +14139,13 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
/* Number of sectors found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
/* Number of bytes found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
/* Length of signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
/* Signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -12037,12 +14170,16 @@
#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
/* Function-relative queue instance */
#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
/* Requested value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
/* Requested value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
@@ -12052,8 +14189,10 @@
#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
/* Actual value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
/* Actual value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
/***********************************/
@@ -12071,29 +14210,35 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
/* Reserved for future use. */
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
* nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
* load/reload count.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
* allowed for timer load/reload counts.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
* multiple of this step size will be rounded in an implementation defined
* manner.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
* meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
/* Timer durations requested via MCDI that are not a multiple of this step size
* will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
/* For timers updated using the bug35388 workaround, this is the time interval
* (in nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
@@ -12101,17 +14246,20 @@
* is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, this is the maximum value
* allowed for timer load/reload counts. This field is only meaningful if the
* bug35388 workaround is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, timer load/reload counts
* not a multiple of this step size will be rounded in an implementation
* defined manner. This field is only meaningful if the bug35388 workaround is
* enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
/***********************************/
@@ -12129,19 +14277,24 @@
* local queue index.
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
/* Will the common pool be used as TX_vFIFO_ULL (1) */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
/* Number of buffers to reserve for the common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
/* TX datapath to which the Common Pool is connected to. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
/* enum: Extracts information from function */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
/* Network port or RX Engine to which the common pool connects. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
/* enum: Extracts information from function */
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
@@ -12157,6 +14310,7 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
/* ID of the common pool allocated */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
/***********************************/
@@ -12173,8 +14327,10 @@
/* Common pool previously allocated to which the new vFIFO will be associated
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
/* Port or RX engine to associate the vFIFO egress */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
/* enum: Extracts information from common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
@@ -12187,12 +14343,15 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
/* Minimum number of buffers that the pool must have */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
/* enum: Do not check the space available */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
/* Will the vFIFO be used as TX_vFIFO_ULL */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
/* Network priority of the vFIFO,if applicable */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
/* enum: Search for the lowest unused priority */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
@@ -12200,8 +14359,10 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
/* Short vFIFO ID */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
/* Network priority of the vFIFO */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
/***********************************/
@@ -12217,6 +14378,7 @@
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
/* Short vFIFO ID */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -12235,6 +14397,7 @@
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
/* Common pool ID given when pool allocated */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -12256,8 +14419,10 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
/* Available buffers for the ENG to NET vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 6e1f282b2976..ce8aabf9091e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
{
- u32 result = 0;
+ #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+ bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
switch (media) {
case MC_CMD_MEDIA_KX4:
- result |= SUPPORTED_Backplane;
+ SET_BIT(Backplane);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseKX_Full;
+ SET_BIT(1000baseKX_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseKX4_Full;
+ SET_BIT(10000baseKX4_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseKR4_Full;
+ SET_BIT(40000baseKR4_Full);
break;
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
- result |= SUPPORTED_FIBRE;
+ SET_BIT(FIBRE);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- result |= SUPPORTED_40000baseCR4_Full;
+ SET_BIT(40000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(100000baseCR4_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(25000baseCR_Full);
+ if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ SET_BIT(50000baseCR2_Full);
break;
case MC_CMD_MEDIA_BASE_T:
- result |= SUPPORTED_TP;
+ SET_BIT(TP);
if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- result |= SUPPORTED_10baseT_Half;
+ SET_BIT(10baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- result |= SUPPORTED_10baseT_Full;
+ SET_BIT(10baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- result |= SUPPORTED_100baseT_Half;
+ SET_BIT(100baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- result |= SUPPORTED_100baseT_Full;
+ SET_BIT(100baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- result |= SUPPORTED_1000baseT_Half;
+ SET_BIT(1000baseT_Half);
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- result |= SUPPORTED_1000baseT_Full;
+ SET_BIT(1000baseT_Full);
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- result |= SUPPORTED_10000baseT_Full;
+ SET_BIT(10000baseT_Full);
break;
}
if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- result |= SUPPORTED_Pause;
+ SET_BIT(Pause);
if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- result |= SUPPORTED_Asym_Pause;
+ SET_BIT(Asym_Pause);
if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- result |= SUPPORTED_Autoneg;
+ SET_BIT(Autoneg);
- return result;
+ #undef SET_BIT
}
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
{
u32 result = 0;
- if (cap & SUPPORTED_10baseT_Half)
+ #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+ linkset)
+
+ if (TEST_BIT(10baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
- if (cap & SUPPORTED_10baseT_Full)
+ if (TEST_BIT(10baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
- if (cap & SUPPORTED_100baseT_Half)
+ if (TEST_BIT(100baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
- if (cap & SUPPORTED_100baseT_Full)
+ if (TEST_BIT(100baseT_Full))
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
- if (cap & SUPPORTED_1000baseT_Half)
+ if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (cap & SUPPORTED_Pause)
+ if (TEST_BIT(100000baseCR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+ if (TEST_BIT(25000baseCR_Full))
+ result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+ if (TEST_BIT(50000baseCR2_Full))
+ result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+ if (TEST_BIT(Pause))
result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
- if (cap & SUPPORTED_Asym_Pause)
+ if (TEST_BIT(Asym_Pause))
result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
- if (cap & SUPPORTED_Autoneg)
+ if (TEST_BIT(Autoneg))
result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+ #undef TEST_BIT
+
return result;
}
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
return flags;
}
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
{
switch (media) {
case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
else
phy_data->forced_cap = caps;
@@ -435,8 +454,8 @@ fail:
int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps = (efx->link_advertising ?
- ethtool_to_mcdi_cap(efx->link_advertising) :
+ u32 caps = (efx->link_advertising[0] ?
+ ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
- u32 supported, advertising, lp_advertising;
- supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
- advertising = efx->link_advertising;
cmd->base.speed = efx->link_state.speed;
cmd->base.duplex = efx->link_state.fd;
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
cmd->base.mdio_support = (efx->mdio.mode_support &
(MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
return;
- lp_advertising =
- mcdi_to_ethtool_cap(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- lp_advertising);
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
}
static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
- u32 advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
if (cmd->base.autoneg) {
- caps = (ethtool_to_mcdi_cap(advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (cmd->base.duplex) {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
}
} else {
switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
}
}
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
return rc;
if (cmd->base.autoneg) {
- efx_link_set_advertising(
- efx, advertising | ADVERTISED_Autoneg);
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_set_advertising(efx, 0);
+ efx_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
@@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
int period = action == EFX_STATS_ENABLE ? 1000 : 0;
dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
u32 dma_len = action != EFX_STATS_DISABLE ?
- MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+ efx->num_mac_stats * sizeof(u64) : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
@@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
{
__le64 *dma_stats = efx->stats_buffer.addr;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
}
@@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
__le64 *dma_stats = efx->stats_buffer.addr;
int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
- dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
- while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ while (dma_stats[efx->num_mac_stats - 1] ==
EFX_MC_STATS_GENERATION_INVALID &&
attempts-- != 0)
udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c0537ea06c9a..d20a8660ee48 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -191,6 +191,7 @@ struct efx_tx_buffer {
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -202,6 +203,10 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
+ * @completed_desc_ptr: Most recent completed pointer - only used with
+ * timestamping.
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -247,6 +252,7 @@ struct efx_tx_queue {
void __iomem *piobuf;
unsigned int piobuf_offset;
bool initialised;
+ bool timestamping;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -257,6 +263,9 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
+ unsigned int completed_desc_ptr;
+ u32 completed_timestamp_major;
+ u32 completed_timestamp_minor;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -522,8 +531,12 @@ struct efx_msi_context {
* @copy: Copy the channel state prior to reallocation. May be %NULL if
* reallocation is not supported.
* @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ * created. If %NULL, TX queues are not created.
* @keep_eventq: Flag for whether event queue should be kept initialised
* while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ * channel's TX queues.
*/
struct efx_channel_type {
void (*handle_no_channel)(struct efx_nic *);
@@ -532,7 +545,9 @@ struct efx_channel_type {
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*want_txqs)(struct efx_channel *);
bool keep_eventq;
+ bool want_pio;
};
enum efx_led_mode {
@@ -708,6 +723,7 @@ struct vfdi_status;
* @reset_work: Scheduled reset workitem
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
* @interrupt_mode: Interrupt mode
* @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
@@ -734,6 +750,7 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -773,6 +790,8 @@ struct vfdi_status;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -812,6 +831,7 @@ struct vfdi_status;
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
* @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
@@ -842,6 +862,8 @@ struct efx_nic {
resource_size_t membase_phys;
void __iomem *membase;
+ unsigned int vi_stride;
+
enum efx_int_mode interrupt_mode;
unsigned int timer_quantum_ns;
unsigned int timer_max_ns;
@@ -875,6 +897,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned n_extra_tx_channels;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -918,6 +941,7 @@ struct efx_nic {
netdev_features_t fixed_features;
+ u16 num_mac_stats;
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -930,7 +954,7 @@ struct efx_nic {
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- u32 link_advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
struct efx_link_state link_state;
unsigned int n_link_state_changes;
@@ -965,6 +989,7 @@ struct efx_nic {
#endif
struct efx_ptp_data *ptp_data;
+ bool ptp_warned;
char *vpd_sn;
@@ -1154,7 +1179,7 @@ struct efx_udp_tunnel {
*/
struct efx_nic_type {
bool is_vf;
- unsigned int mem_bar;
+ unsigned int (*mem_bar)(struct efx_nic *efx);
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1355,8 +1380,8 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->channel - channel->efx->tx_channel_offset <
- channel->efx->n_tx_channels;
+ return channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel);
}
static inline struct efx_tx_queue *
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b51b6371724..6549fc685a48 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -325,6 +325,30 @@ enum {
EF10_STAT_tx_bad,
EF10_STAT_tx_bad_bytes,
EF10_STAT_tx_overflow,
+ EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+ EF10_STAT_fec_corrected_errors,
+ EF10_STAT_fec_corrected_symbols_lane0,
+ EF10_STAT_fec_corrected_symbols_lane1,
+ EF10_STAT_fec_corrected_symbols_lane2,
+ EF10_STAT_fec_corrected_symbols_lane3,
+ EF10_STAT_ctpio_dmabuf_start,
+ EF10_STAT_ctpio_vi_busy_fallback,
+ EF10_STAT_ctpio_long_write_success,
+ EF10_STAT_ctpio_missing_dbell_fail,
+ EF10_STAT_ctpio_overflow_fail,
+ EF10_STAT_ctpio_underflow_fail,
+ EF10_STAT_ctpio_timeout_fail,
+ EF10_STAT_ctpio_noncontig_wr_fail,
+ EF10_STAT_ctpio_frm_clobber_fail,
+ EF10_STAT_ctpio_invalid_wr_fail,
+ EF10_STAT_ctpio_vi_clobber_fallback,
+ EF10_STAT_ctpio_unqualified_fallback,
+ EF10_STAT_ctpio_runt_fallback,
+ EF10_STAT_ctpio_success,
+ EF10_STAT_ctpio_fallback,
+ EF10_STAT_ctpio_poison,
+ EF10_STAT_ctpio_erase,
EF10_STAT_COUNT
};
@@ -416,6 +440,7 @@ struct efx_ef10_nic_data {
struct efx_udp_tunnel udp_tunnels[16];
bool udp_tunnels_dirty;
struct mutex udp_tunnels_lock;
+ u64 licensed_features;
};
int efx_init_sriov(void);
@@ -424,6 +449,7 @@ void efx_fini_sriov(void);
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
void efx_ptp_remove(struct efx_nic *efx);
int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
@@ -447,6 +473,8 @@ static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
}
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index caa89bf7603e..f21661532ed3 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -149,18 +149,14 @@ enum ptp_packet_state {
/* Maximum parts-per-billion adjustment that is acceptable */
#define MAX_PPB 1000000
-/* Number of bits required to hold the above */
-#define MAX_PPB_BITS 20
-
-/* Number of extra bits allowed when calculating fractional ns.
- * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
- * be less than 63.
- */
-#define PPB_EXTRA_BITS 2
-
/* Precalculate scale word to avoid long long division at runtime */
-#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
- MAX_PPB_BITS)) / 1000000000LL)
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40 26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44 22
#define PTP_SYNC_ATTEMPTS 4
@@ -218,8 +214,8 @@ struct efx_ptp_timeset {
* @channel: The PTP channel (Siena only)
* @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
* separate events)
- * @rxq: Receive queue (awaiting timestamps)
- * @txq: Transmit queue
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
@@ -233,19 +229,36 @@ struct efx_ptp_timeset {
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
- * @time_format: Time format supported by this NIC
* @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
* @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
* @min_synchronisation_ns: Minimum acceptable corrected sync window
- * @ts_corrections.tx: Required driver correction of transmit timestamps
- * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ * timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ * timestamps
* @ts_corrections.pps_out: PPS output error (information only)
* @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ * transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ * receive timestamps
* @evt_frags: Partly assembled PTP events
* @evt_frag_idx: Current fragment number
* @evt_code: Last event code
* @start: Address at which MC indicates ready for synchronisation
* @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
* @current_adjfreq: Current ppb adjustment.
* @phc_clock: Pointer to registered phc device (if primary function)
* @phc_clock_info: Registration structure for phc device
@@ -264,6 +277,7 @@ struct efx_ptp_timeset {
* @oversize_sync_windows: Number of corrected sync windows that are too large
* @rx_no_timestamp: Number of packets received without a timestamp.
* @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
*/
struct efx_ptp_data {
struct efx_nic *efx;
@@ -284,22 +298,31 @@ struct efx_ptp_data {
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
- unsigned int time_format;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
s32 correction);
+ struct {
+ u32 minor_max;
+ u32 sync_event_diff_min;
+ u32 sync_event_diff_max;
+ unsigned int sync_event_minor_shift;
+ } nic_time;
unsigned int min_synchronisation_ns;
+ unsigned int capabilities;
struct {
- s32 tx;
- s32 rx;
+ s32 ptp_tx;
+ s32 ptp_rx;
s32 pps_out;
s32 pps_in;
+ s32 general_tx;
+ s32 general_rx;
} ts_corrections;
efx_qword_t evt_frags[MAX_EVENT_FRAGS];
int evt_frag_idx;
int evt_code;
struct efx_buffer start;
struct pps_event_time host_time_pps;
+ unsigned int adjfreq_ppb_shift;
s64 current_adjfreq;
struct ptp_clock *phc_clock;
struct ptp_clock_info phc_clock_info;
@@ -319,6 +342,7 @@ struct efx_ptp_data {
unsigned int rx_no_timestamp;
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+ void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
};
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
@@ -329,6 +353,24 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
static int efx_phc_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return ((efx_nic_rev(efx) >= EFX_REV_HUNT_A0) &&
+ (nic_data->datapath_caps2 &
+ (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN)
+ ));
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+ return efx_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
#define PTP_SW_STAT(ext_name, field_name) \
{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
#define PTP_MC_STAT(ext_name, mcdi_name) \
@@ -471,6 +513,89 @@ static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
return efx_ptp_s27_to_ktime(nic_major, nic_minor);
}
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ *nic_major = (u32)ts.tv_sec;
+ *nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt;
+
+ nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+ correction = DIV_ROUND_CLOSEST(correction, 4);
+
+ kt = ktime_set(nic_major, nic_minor);
+
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
+{
+ return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_ptp_channel(efx);
+ u32 major = 0;
+
+ if (channel)
+ major = channel->sync_timestamp_major;
+ return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+ struct efx_ptp_data *ptp,
+ u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = { 0 };
+
+ if (!(nic_major & 0x80000000)) {
+ WARN_ON_ONCE(nic_major >> 16);
+ /* Use the top bits from the latest sync event. */
+ nic_major &= 0xffff;
+ nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+ kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+ correction);
+ }
+ return kt;
+}
+
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ ktime_t kt;
+
+ if (efx_ptp_use_mac_tx_timestamps(efx))
+ kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ else
+ kt = ptp->nic_to_kernel_time(
+ tx_queue->completed_timestamp_major,
+ tx_queue->completed_timestamp_minor,
+ ptp->ts_corrections.general_tx);
+ return kt;
+}
+
/* Get PTP attributes and set up time conversions */
static int efx_ptp_get_attributes(struct efx_nic *efx)
{
@@ -502,31 +627,71 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
return rc;
}
- if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ switch (fmt) {
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
- } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->nic_time.minor_max = 1 << 27;
+ ptp->nic_time.sync_event_minor_shift = 19;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
- } else {
+ ptp->nic_time.minor_max = 1000000000;
+ ptp->nic_time.sync_event_minor_shift = 22;
+ break;
+ case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+ ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+ ptp->nic_time.minor_max = 4000000000UL;
+ ptp->nic_time.sync_event_minor_shift = 24;
+ break;
+ default:
return -ERANGE;
}
- ptp->time_format = fmt;
-
- /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
- * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
- * to use for the minimum acceptable corrected synchronization window.
+ /* Precalculate acceptable difference between the minor time in the
+ * packet prefix and the last MCDI time sync event. We expect the
+ * packet prefix timestamp to be after of sync event by up to one
+ * sync event interval (0.25s) but we allow it to exceed this by a
+ * fuzz factor of (0.1s)
+ */
+ ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+ - (ptp->nic_time.minor_max / 10);
+ ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+ + (ptp->nic_time.minor_max / 10);
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+ * a value to use for the minimum acceptable corrected synchronization
+ * window and may return further capabilities.
* If we have the extra information store it. For older firmware that
* does not implement the extended command use the default value.
*/
- if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
ptp->min_synchronisation_ns =
MCDI_DWORD(outbuf,
PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
else
ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+ if (rc == 0 &&
+ out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->capabilities = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+ else
+ ptp->capabilities = 0;
+
+ /* Set up the shift for conversion between frequency
+ * adjustments in parts-per-billion and the fixed-point
+ * fractional ns format that the adapter uses.
+ */
+ if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+ else
+ ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
return 0;
}
@@ -534,8 +699,9 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
int rc;
+ size_t out_len;
/* Get the timestamp corrections from the NIC. If this operation is
* not supported (older NICs) then no correction is required.
@@ -545,21 +711,37 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ outbuf, sizeof(outbuf), &out_len);
if (rc == 0) {
- efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
- efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+ if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+ efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+ efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+ outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+ } else {
+ efx->ptp_data->ts_corrections.general_tx =
+ efx->ptp_data->ts_corrections.ptp_tx;
+ efx->ptp_data->ts_corrections.general_rx =
+ efx->ptp_data->ts_corrections.ptp_rx;
+ }
} else if (rc == -EINVAL) {
- efx->ptp_data->ts_corrections.tx = 0;
- efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.ptp_tx = 0;
+ efx->ptp_data->ts_corrections.ptp_rx = 0;
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
+ efx->ptp_data->ts_corrections.general_tx = 0;
+ efx->ptp_data->ts_corrections.general_rx = 0;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
sizeof(outbuf), rc);
@@ -873,8 +1055,24 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
return rc;
}
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ struct efx_tx_queue *tx_queue;
+ u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+
+ tx_queue = &ptp_data->channel->tx_queue[type];
+ if (tx_queue && tx_queue->timestamping) {
+ efx_enqueue_skb(tx_queue, skb);
+ } else {
+ WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+ dev_kfree_skb_any(skb);
+ }
+}
+
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
-static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
{
struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
@@ -910,16 +1108,16 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
- ptp_data->ts_corrections.tx);
+ ptp_data->ts_corrections.ptp_tx);
skb_tstamp_tx(skb, &timestamps);
rc = 0;
fail:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
- return rc;
+ return;
}
static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
@@ -1189,7 +1387,7 @@ static void efx_ptp_worker(struct work_struct *work)
efx_ptp_process_events(efx, &tempq);
while ((skb = skb_dequeue(&ptp_data->txq)))
- efx_ptp_xmit_skb(efx, skb);
+ ptp_data->xmit_skb(efx, skb);
while ((skb = __skb_dequeue(&tempq)))
efx_ptp_process_rx(efx, skb);
@@ -1239,6 +1437,14 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
goto fail2;
}
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+ /* Request sync events on this channel. */
+ channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+ } else {
+ ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+ }
+
INIT_WORK(&ptp->work, efx_ptp_worker);
ptp->config.flags = 0;
ptp->config.tx_type = HWTSTAMP_TX_OFF;
@@ -1303,11 +1509,21 @@ fail1:
static int efx_ptp_probe_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ int rc;
channel->irq_moderation_us = 0;
channel->rx_queue.core_index = 0;
- return efx_ptp_probe(efx, channel);
+ rc = efx_ptp_probe(efx, channel);
+ /* Failure to probe PTP is not fatal; this channel will just not be
+ * used for anything.
+ * In the case of EPERM, efx_ptp_probe will print its own message (in
+ * efx_ptp_get_attributes()), so we don't need to.
+ */
+ if (rc && rc != -EPERM)
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to probe PTP, rc=%d\n", rc);
+ return 0;
}
void efx_ptp_remove(struct efx_nic *efx)
@@ -1332,6 +1548,7 @@ void efx_ptp_remove(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->ptp_data->start);
kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
}
static void efx_ptp_remove_channel(struct efx_channel *channel)
@@ -1548,6 +1765,17 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
+ /* Check licensed features. If we don't have the license for TX
+ * timestamps, the NIC will not support them.
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!(nic_data->licensed_features &
+ (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
+ ts_info->so_timestamping &=
+ ~SOF_TIMESTAMPING_TX_HARDWARE;
+ }
if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
ts_info->phc_index =
ptp_clock_index(primary->ptp_data->phc_clock);
@@ -1627,7 +1855,7 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
- ptp->ts_corrections.rx);
+ ptp->ts_corrections.ptp_rx);
evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
list_add_tail(&evt->link, &ptp->evt_list);
@@ -1662,9 +1890,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
if (!ptp) {
- if (net_ratelimit())
+ if (!efx->ptp_warned) {
netif_warn(efx, drv, efx->net_dev,
"Received PTP event but PTP not set up\n");
+ efx->ptp_warned = true;
+ }
return;
}
@@ -1707,9 +1937,20 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ /* When extracting the sync timestamp minor value, we should discard
+ * the least significant two bits. These are not required in order
+ * to reconstruct full-range timestamps and they are optionally used
+ * to report status depending on the options supplied when subscribing
+ * for sync events.
+ */
channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
channel->sync_timestamp_minor =
- MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+ << ptp->nic_time.sync_event_minor_shift;
+
/* if sync events have been disabled then we want to silently ignore
* this event, so throw away result.
*/
@@ -1717,15 +1958,6 @@ void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
SYNC_EVENTS_VALID);
}
-/* make some assumptions about the time representation rather than abstract it,
- * since we currently only support one type of inline timestamping and only on
- * EF10.
- */
-#define MINOR_TICKS_PER_SECOND 0x8000000
-/* Fuzz factor for sync events to be out of order with RX events */
-#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
-#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
-
static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
@@ -1743,31 +1975,33 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
u32 pkt_timestamp_major, pkt_timestamp_minor;
u32 diff, carry;
struct skb_shared_hwtstamps *timestamps;
- pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
- skb_mac_header(skb)) +
- (u32) efx->ptp_data->ts_corrections.rx) &
- (MINOR_TICKS_PER_SECOND - 1);
+ if (channel->sync_events_state != SYNC_EVENTS_VALID)
+ return;
+
+ pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
/* get the difference between the packet and sync timestamps,
* modulo one second
*/
- diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
- (MINOR_TICKS_PER_SECOND - 1);
+ diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+ if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+ diff += ptp->nic_time.minor_max;
+
/* do we roll over a second boundary and need to carry the one? */
- carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
1 : 0;
- if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
- FUZZ) {
+ if (diff <= ptp->nic_time.sync_event_diff_max) {
/* packet is ahead of the sync event by a quarter of a second or
* less (allowing for fuzz)
*/
pkt_timestamp_major = channel->sync_timestamp_major + carry;
- } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ } else if (diff >= ptp->nic_time.sync_event_diff_min) {
/* packet is behind the sync event but within the fuzz factor.
* This means the RX packet and sync event crossed as they were
* placed on the event queue, which can sometimes happen.
@@ -1789,7 +2023,9 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
/* attach the timestamps to the skb */
timestamps = skb_hwtstamps(skb);
timestamps->hwtstamp =
- efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+ ptp->nic_to_kernel_time(pkt_timestamp_major,
+ pkt_timestamp_minor,
+ ptp->ts_corrections.general_rx);
}
static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
@@ -1807,9 +2043,10 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
else if (delta < -MAX_PPB)
delta = -MAX_PPB;
- /* Convert ppb to fixed point ns. */
- adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
- (PPB_EXTRA_BITS + MAX_PPB_BITS));
+ /* Convert ppb to fixed point ns taking care to round correctly. */
+ adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+ (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+ ptp_data->adjfreq_ppb_shift;
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
@@ -1916,6 +2153,7 @@ static const struct efx_channel_type efx_ptp_channel_type = {
.get_name = efx_ptp_get_channel_name,
/* no copy operation; there is no need to reallocate this channel */
.receive_skb = efx_ptp_rx,
+ .want_txqs = efx_ptp_want_txqs,
.keep_eventq = false,
};
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a617f657eae3..ae8645ae4492 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx)
return 0;
}
+/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3)
+ * for memory.
+ */
+static unsigned int siena_mem_bar(struct efx_nic *efx)
+{
+ return 2;
+}
+
static unsigned int siena_mem_map_size(struct efx_nic *efx)
{
return FR_CZ_MC_TREG_SMEM +
@@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
dma_stats = efx->stats_buffer.addr;
- generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ generation_end = dma_stats[efx->num_mac_stats - 1];
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
@@ -950,7 +958,7 @@ fail:
const struct efx_nic_type siena_a0_nic_type = {
.is_vf = false,
- .mem_bar = EFX_MEM_BAR,
+ .mem_bar = siena_mem_bar,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9937a2450e57..cece961f2e82 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -77,9 +77,23 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->flags & EFX_TX_BUF_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
- (*bytes_compl) += buffer->skb->len;
+ (*bytes_compl) += skb->len;
+ if (tx_queue->timestamping &&
+ (tx_queue->completed_timestamp_major ||
+ tx_queue->completed_timestamp_minor)) {
+ struct skb_shared_hwtstamps hwtstamp;
+
+ hwtstamp.hwtstamp =
+ efx_ptp_nic_to_kernel_time(tx_queue);
+ skb_tstamp_tx(skb, &hwtstamp);
+
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
+ }
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
@@ -828,6 +842,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
+ tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+ tx_queue->channel == efx_ptp_channel(efx));
+ tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+ tx_queue->completed_timestamp_major = 0;
+ tx_queue->completed_timestamp_minor = 0;
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644
index 000000000000..6bcfe27fc560
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -0,0 +1,34 @@
+config NET_VENDOR_SOCIONEXT
+ bool "Socionext ethernet drivers"
+ default y
+ ---help---
+ Option to select ethernet drivers for Socionext platforms.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Socionext devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+ tristate "Socionext AVE ethernet support"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ select PHYLIB
+ ---help---
+ Driver for gigabit ethernet MACs, called AVE, in the
+ Socionext UniPhier family.
+
+config SNI_NETSEC
+ tristate "Socionext NETSEC ethernet support"
+ depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+ select PHYLIB
+ select MII
+ ---help---
+ Enable to add support for the SocioNext NetSec Gigabit Ethernet
+ controller + PHY, as found on the Synquacer SC2A11 SoC
+
+ To compile this driver as a module, choose M here: the module will be
+ called netsec. If unsure, say N.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644
index 000000000000..7fd837a999fd
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 000000000000..f4c0b02ddad8
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST 0x104
+#define NETSEC_REG_COM_INIT 0x120
+
+#define NETSEC_REG_TOP_STATUS 0x200
+#define NETSEC_IRQ_RX BIT(1)
+#define NETSEC_IRQ_TX BIT(0)
+
+#define NETSEC_REG_TOP_INTEN 0x204
+#define NETSEC_REG_INTEN_SET 0x234
+#define NETSEC_REG_INTEN_CLR 0x238
+
+#define NETSEC_REG_NRM_TX_STATUS 0x400
+#define NETSEC_REG_NRM_TX_INTEN 0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
+#define NRM_TX_ST_NTOWNR BIT(17)
+#define NRM_TX_ST_TR_ERR BIT(16)
+#define NRM_TX_ST_TXDONE BIT(15)
+#define NRM_TX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS 0x440
+#define NETSEC_REG_NRM_RX_INTEN 0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
+#define NRM_RX_ST_RC_ERR BIT(16)
+#define NRM_RX_ST_PKTCNT BIT(15)
+#define NRM_RX_ST_TMREXP BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF 0xd0
+
+#define NETSEC_REG_CLK_EN 0x100
+
+#define NETSEC_REG_PKT_CTRL 0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL 0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
+#define NETSEC_REG_F_TAIKI_VER 0x230
+#define NETSEC_REG_DMA_HM_CTRL 0x214
+#define NETSEC_REG_DMA_MH_CTRL 0x220
+#define NETSEC_REG_ADDR_DIS_CORE 0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT 0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
+
+#define NETSEC_REG_NRM_TX_TMR 0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT 0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
+
+#define NETSEC_REG_NRM_RX_TMR 0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG 0x430
+#define NETSEC_REG_NRM_RX_CONFIG 0x470
+
+#define MAC_REG_STATUS 0x1024
+#define MAC_REG_DATA 0x11c0
+#define MAC_REG_CMD 0x11c4
+#define MAC_REG_FLOW_TH 0x11cc
+#define MAC_REG_INTF_SEL 0x11d4
+#define MAC_REG_DESC_INIT 0x11fc
+#define MAC_REG_DESC_SOFT_RST 0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
+
+#define GMAC_REG_MCR 0x0000
+#define GMAC_REG_MFFR 0x0004
+#define GMAC_REG_GAR 0x0010
+#define GMAC_REG_GDR 0x0014
+#define GMAC_REG_FCR 0x0018
+#define GMAC_REG_BMR 0x1000
+#define GMAC_REG_RDLAR 0x100c
+#define GMAC_REG_TDLAR 0x1010
+#define GMAC_REG_OMR 0x1018
+
+#define MHZ(n) ((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD 31
+#define NETSEC_TX_SHIFT_LD_FIELD 30
+#define NETSEC_TX_SHIFT_DRID_FIELD 24
+#define NETSEC_TX_SHIFT_PT_FIELD 21
+#define NETSEC_TX_SHIFT_TDRID_FIELD 16
+#define NETSEC_TX_SHIFT_CC_FIELD 15
+#define NETSEC_TX_SHIFT_FS_FIELD 9
+#define NETSEC_TX_LAST 8
+#define NETSEC_TX_SHIFT_CO 7
+#define NETSEC_TX_SHIFT_SO 6
+#define NETSEC_TX_SHIFT_TRS_FIELD 4
+
+#define NETSEC_RX_PKT_OWN_FIELD 31
+#define NETSEC_RX_PKT_LD_FIELD 30
+#define NETSEC_RX_PKT_SDRID_FIELD 24
+#define NETSEC_RX_PKT_FR_FIELD 23
+#define NETSEC_RX_PKT_ER_FIELD 21
+#define NETSEC_RX_PKT_ERR_FIELD 16
+#define NETSEC_RX_PKT_TDRID_FIELD 12
+#define NETSEC_RX_PKT_FS_FIELD 9
+#define NETSEC_RX_PKT_LS_FIELD 8
+#define NETSEC_RX_PKT_CO_FIELD 6
+
+#define NETSEC_RX_PKT_ERR_MASK 3
+
+#define NETSEC_MAX_TX_PKT_LEN 1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
+
+#define NETSEC_RING_GMAC 15
+#define NETSEC_RING_MAX 2
+
+#define NETSEC_TCP_SEG_LEN_MAX 1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL 0
+#define NETSEC_RX_CKSUM_OK 1
+#define NETSEC_RX_CKSUM_NG 2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX 2047
+
+#define NETSEC_FLOW_START_TH_MAX 95
+#define NETSEC_FLOW_STOP_TH_MAX 95
+#define NETSEC_FLOW_PAUSE_TIME_MIN 5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB BIT(2)
+#define NETSEC_COM_INIT_REG_CLS BIT(1)
+#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
+ NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET 0
+#define NETSEC_SOFT_RST_REG_RUN BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP 1
+#define MH_CTRL__MODE_TRANS BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ 0
+#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE BIT(20)
+#define NETSEC_MCR_PS BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
+
+#define NETSEC_FCR_RFE BIT(2)
+#define NETSEC_FCR_TFE BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
+#define GMAC_REG_SHIFT_CR_GAR 2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE 4
+#define NETSEC_REG_DESC_ENDIAN 0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
+#define NETSEC_MAC_DESC_INIT_REG_INIT 1
+
+#define NETSEC_EEPROM_MAC_ADDRESS 0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE 0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
+#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
+
+#define DESC_NUM 128
+#define NAPI_BUDGET (DESC_NUM / 2)
+
+#define DESC_SZ sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+
+enum ring_id {
+ NETSEC_RING_TX = 0,
+ NETSEC_RING_RX
+};
+
+struct netsec_desc {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *addr;
+ u16 len;
+};
+
+struct netsec_desc_ring {
+ dma_addr_t desc_dma;
+ struct netsec_desc *desc;
+ void *vaddr;
+ u16 pkt_cnt;
+ u16 head, tail;
+};
+
+struct netsec_priv {
+ struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+ struct ethtool_coalesce et_coalesce;
+ spinlock_t reglock; /* protect reg access */
+ struct napi_struct napi;
+ phy_interface_t phy_interface;
+ struct net_device *ndev;
+ struct device_node *phy_np;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ void __iomem *ioaddr;
+ void __iomem *eeprom_base;
+ struct device *dev;
+ struct clk *clk;
+ u32 msg_enable;
+ u32 freq;
+ bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+ u32 attr;
+ u32 data_buf_addr_up;
+ u32 data_buf_addr_lw;
+ u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+ u16 tcp_seg_len;
+ bool tcp_seg_offload_flag;
+ bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+ int rx_cksum_result;
+ int err_code;
+ bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+ writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+ return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC 1000
+#define TIMEOUT_SECONDARY_MS_MAC 100
+
+static u32 netsec_clk_type(u32 freq)
+{
+ if (freq < MHZ(35))
+ return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+ if (freq < MHZ(60))
+ return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+ if (freq < MHZ(100))
+ return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+ if (freq < MHZ(150))
+ return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+ if (freq < MHZ(250))
+ return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+ return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+
+ while (--timeout && netsec_read(priv, addr) & mask)
+ cpu_relax();
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ while (--timeout && netsec_read(priv, addr) & mask)
+ usleep_range(1000, 2000);
+
+ if (timeout)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+ netsec_write(priv, MAC_REG_DATA, value);
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+ return netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+ int ret;
+
+ netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+ ret = netsec_wait_while_busy(priv,
+ MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+ if (ret)
+ return ret;
+
+ *read = netsec_read(priv, MAC_REG_DATA);
+
+ return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+ u32 addr, u32 mask)
+{
+ u32 timeout = TIMEOUT_SPINS_MAC;
+ int ret, data;
+
+ do {
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout)
+ return 0;
+
+ timeout = TIMEOUT_SECONDARY_MS_MAC;
+ do {
+ usleep_range(1000, 2000);
+
+ ret = netsec_mac_read(priv, addr, &data);
+ if (ret)
+ break;
+ cpu_relax();
+ } while (--timeout && (data & mask));
+
+ if (timeout && !ret)
+ return 0;
+
+ netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+
+ value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+ if (phydev->speed != SPEED_1000)
+ value |= NETSEC_MCR_PS;
+
+ if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+ phydev->speed == SPEED_100)
+ value |= NETSEC_GMAC_MCR_REG_FES;
+
+ value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ value |= NETSEC_GMAC_MCR_REG_IBN;
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+{
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_GAR,
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+ struct netsec_priv *priv = bus->priv;
+ u32 data;
+ int ret;
+
+ if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+ phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+ reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+ (netsec_clk_type(priv->freq) <<
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "netsec", sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ *et_coalesce = priv->et_coalesce;
+
+ return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct netsec_priv *priv = netdev_priv(net_device);
+
+ priv->et_coalesce = *et_coalesce;
+
+ if (priv->et_coalesce.tx_coalesce_usecs < 50)
+ priv->et_coalesce.tx_coalesce_usecs = 50;
+ if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+ priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+ priv->et_coalesce.tx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+ priv->et_coalesce.tx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+ if (priv->et_coalesce.rx_coalesce_usecs < 50)
+ priv->et_coalesce.rx_coalesce_usecs = 50;
+ if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+ priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+ priv->et_coalesce.rx_max_coalesced_frames);
+ netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+ priv->et_coalesce.rx_coalesce_usecs);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+ return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct netsec_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+ .get_drvinfo = netsec_et_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = netsec_et_get_coalesce,
+ .set_coalesce = netsec_et_set_coalesce,
+ .get_msglevel = netsec_et_get_msglevel,
+ .set_msglevel = netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+ struct netsec_desc *desc)
+{
+ struct sk_buff *skb;
+
+ if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+ skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+ } else {
+ desc->len = L1_CACHE_ALIGN(desc->len);
+ skb = netdev_alloc_skb(priv->ndev, desc->len);
+ }
+ if (!skb)
+ return NULL;
+
+ desc->addr = skb->data;
+ desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring, u16 idx,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+ u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+ (1 << NETSEC_RX_PKT_FS_FIELD) |
+ (1 << NETSEC_RX_PKT_LS_FIELD);
+
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx].dma_addr = desc->dma_addr;
+ dring->desc[idx].addr = desc->addr;
+ dring->desc[idx].len = desc->len;
+ dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ u16 idx,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc, u16 *len)
+{
+ struct netsec_de de = {};
+
+ memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+ *len = de.buf_len_info >> 16;
+
+ rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+ rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ *desc = dring->desc[idx];
+ return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+ struct netsec_rx_pkt_info *rxpi,
+ struct netsec_desc *desc,
+ u16 *len)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct sk_buff *tmp_skb, *skb = NULL;
+ struct netsec_desc td;
+ int tail;
+
+ *rxpi = (struct netsec_rx_pkt_info){};
+
+ td.len = priv->ndev->mtu + 22;
+
+ tmp_skb = netsec_alloc_skb(priv, &td);
+
+ dma_rmb();
+
+ tail = dring->tail;
+
+ if (!tmp_skb) {
+ netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+ dring->desc[tail].skb);
+ } else {
+ skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+ netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+ }
+
+ /* move tail ahead */
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+
+ dring->pkt_cnt--;
+
+ return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ unsigned int pkts, bytes;
+
+ dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ if (dring->pkt_cnt < budget)
+ budget = dring->pkt_cnt;
+
+ pkts = 0;
+ bytes = 0;
+
+ while (pkts < budget) {
+ struct netsec_desc *desc;
+ struct netsec_de *entry;
+ int tail, eop;
+
+ tail = dring->tail;
+
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ desc = &dring->desc[tail];
+ entry = dring->vaddr + DESC_SZ * tail;
+
+ eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ if (eop) {
+ pkts++;
+ bytes += desc->skb->len;
+ dev_kfree_skb(desc->skb);
+ }
+ *desc = (struct netsec_desc){};
+ }
+ dring->pkt_cnt -= budget;
+
+ priv->ndev->stats.tx_packets += budget;
+ priv->ndev->stats.tx_bytes += bytes;
+
+ netdev_completed_queue(priv->ndev, budget, bytes);
+
+ return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ int new, done = 0;
+
+ do {
+ new = netsec_clean_tx_dring(priv, budget);
+ done += new;
+ budget -= new;
+ } while (new);
+
+ if (done && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct net_device *ndev = priv->ndev;
+ struct netsec_rx_pkt_info rx_info;
+ int done = 0, rx_num = 0;
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ u16 len;
+
+ while (done < budget) {
+ if (!rx_num) {
+ rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+ dring->pkt_cnt += rx_num;
+
+ /* move head 'rx_num' */
+ dring->head = (dring->head + rx_num) % DESC_NUM;
+
+ rx_num = dring->pkt_cnt;
+ if (!rx_num)
+ break;
+ }
+ done++;
+ rx_num--;
+ skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+ if (unlikely(!skb) || rx_info.err_flag) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: rx fail err(%d)\n",
+ __func__, rx_info.err_code);
+ ndev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+
+ if (priv->rx_cksum_offload_flag &&
+ rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+ }
+
+ return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct netsec_priv *priv;
+ struct net_device *ndev;
+ int tx, rx, done, todo;
+
+ priv = container_of(napi, struct netsec_priv, napi);
+ ndev = priv->ndev;
+
+ todo = budget;
+ do {
+ if (!todo)
+ break;
+
+ tx = netsec_process_tx(priv, todo);
+ todo -= tx;
+
+ if (!todo)
+ break;
+
+ rx = netsec_process_rx(priv, todo);
+ todo -= rx;
+ } while (rx || tx);
+
+ done = budget - todo;
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_SET,
+ NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+ }
+
+ return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+ struct netsec_desc_ring *dring,
+ const struct netsec_tx_pkt_ctrl *tx_ctrl,
+ const struct netsec_desc *desc,
+ struct sk_buff *skb)
+{
+ int idx = dring->head;
+ struct netsec_de *de;
+ u32 attr;
+
+ de = dring->vaddr + (DESC_SZ * idx);
+
+ attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+ (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+ (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+ (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+ (1 << NETSEC_TX_LAST) |
+ (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+ (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+ (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+ if (idx == DESC_NUM - 1)
+ attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+ de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+ de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+ de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+ de->attr = attr;
+ dma_wmb();
+
+ dring->desc[idx] = *desc;
+ dring->desc[idx].skb = skb;
+
+ /* move head ahead */
+ dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ u16 tso_seg_len = 0;
+ int filled;
+
+ /* differentiate between full/emtpy ring */
+ if (dring->head >= dring->tail)
+ filled = dring->head - dring->tail;
+ else
+ filled = dring->head + DESC_NUM - dring->tail;
+
+ if (DESC_NUM - filled < 2) { /* if less than 2 available */
+ netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_ctrl.cksum_offload_flag = true;
+
+ if (skb_is_gso(skb))
+ tso_seg_len = skb_shinfo(skb)->gso_size;
+
+ if (tso_seg_len > 0) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(0, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0);
+ } else {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ tx_ctrl.tcp_seg_offload_flag = true;
+ tx_ctrl.tcp_seg_len = tso_seg_len;
+ }
+
+ tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+ netif_err(priv, drv, priv->ndev,
+ "%s: DMA mapping failed\n", __func__);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tx_desc.addr = skb->data;
+ tx_desc.len = skb_headlen(skb);
+
+ skb_tx_timestamp(skb);
+ netdev_sent_queue(priv->ndev, skb->len);
+
+ netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ struct netsec_desc *desc;
+ u16 idx;
+
+ if (!dring->vaddr || !dring->desc)
+ return;
+
+ for (idx = 0; idx < DESC_NUM; idx++) {
+ desc = &dring->desc[idx];
+ if (!desc->addr)
+ continue;
+
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ dev_kfree_skb(desc->skb);
+ }
+
+ memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+ memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+ dring->head = 0;
+ dring->tail = 0;
+ dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+ if (dring->vaddr) {
+ dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ dring->vaddr, dring->desc_dma);
+ dring->vaddr = NULL;
+ }
+
+ kfree(dring->desc);
+ dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+ int ret = 0;
+
+ dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
+ if (!dring->vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+ if (!dring->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ netsec_free_dring(priv, id);
+
+ return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct netsec_desc desc;
+ struct sk_buff *skb;
+ int n;
+
+ desc.len = priv->ndev->mtu + 22;
+
+ for (n = 0; n < DESC_NUM; n++) {
+ skb = netsec_alloc_skb(priv, &desc);
+ if (!skb) {
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ return -ENOMEM;
+ }
+ netsec_set_rx_de(priv, dring, n, &desc, skb);
+ }
+
+ return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+ u32 addr_h, u32 addr_l, u32 size)
+{
+ u64 base = (u64)addr_h << 32 | addr_l;
+ void __iomem *ucode;
+ u32 i;
+
+ ucode = ioremap(base, size * sizeof(u32));
+ if (!ucode)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++)
+ netsec_write(priv, reg, readl(ucode + i * 4));
+
+ iounmap(ucode);
+ return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+ u32 addr_h, addr_l, size;
+ int err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ addr_h = 0;
+ addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+ size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+ err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+ addr_h, addr_l, size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+ u32 value;
+ int err;
+
+ /* stop DMA engines */
+ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+ netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+ NETSEC_DMA_CTRL_REG_STOP);
+
+ while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+
+ while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+ NETSEC_DMA_CTRL_REG_STOP)
+ cpu_relax();
+ }
+
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+ netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+ netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+ while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+ cpu_relax();
+
+ /* set desc_start addr */
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
+
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+ upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+ netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+ lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
+
+ /* set normal tx dring ring config */
+ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+ netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+ 1 << NETSEC_REG_DESC_ENDIAN);
+
+ err = netsec_netdev_load_microcode(priv);
+ if (err) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: failed to load microcode (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* start DMA engines */
+ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+ netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+ usleep_range(1000, 2000);
+
+ if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+ netif_err(priv, probe, priv->ndev,
+ "microengine start failed\n");
+ return -ENXIO;
+ }
+ netsec_write(priv, NETSEC_REG_TOP_STATUS,
+ NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+ value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+ if (priv->ndev->mtu > ETH_DATA_LEN)
+ value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+ /* change to normal mode */
+ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+ netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+ while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+ NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+ cpu_relax();
+
+ /* clear any pending EMPTY/ERR irq status */
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+ /* Disable TX & RX intr */
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+ return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 value = 0;
+ int ret;
+
+ if (phydev->speed != SPEED_1000)
+ value = (NETSEC_GMAC_MCR_REG_CST |
+ NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+ if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_RESET))
+ return -ETIMEDOUT;
+
+ /* Wait soft reset */
+ usleep_range(1000, 5000);
+
+ ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+ if (ret)
+ return ret;
+ if (value & NETSEC_GMAC_BMR_REG_SWR)
+ return -EAGAIN;
+
+ netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+ return -ETIMEDOUT;
+
+ netsec_write(priv, MAC_REG_DESC_INIT, 1);
+ if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+ return -ETIMEDOUT;
+
+ if (netsec_mac_write(priv, GMAC_REG_BMR,
+ NETSEC_GMAC_BMR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+ NETSEC_GMAC_RDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+ NETSEC_GMAC_TDLAR_REG_COMMON))
+ return -ETIMEDOUT;
+ if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+ return -ETIMEDOUT;
+
+ ret = netsec_mac_update_to_phy_state(priv);
+ if (ret)
+ return ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+
+ value |= NETSEC_GMAC_OMR_REG_SR;
+ value |= NETSEC_GMAC_OMR_REG_ST;
+
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+ if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+ u32 value;
+ int ret;
+
+ ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+ if (ret)
+ return ret;
+ value &= ~NETSEC_GMAC_OMR_REG_SR;
+ value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+ /* disable all interrupts */
+ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+ netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+ return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ if (ndev->phydev->link)
+ netsec_start_gmac(priv);
+ else
+ netsec_stop_gmac(priv);
+
+ phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+ struct netsec_priv *priv = dev_id;
+ u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+ unsigned long flags;
+
+ /* Disable interrupts */
+ if (status & NETSEC_IRQ_TX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+ }
+ if (status & NETSEC_IRQ_RX) {
+ val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+ netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+ spin_unlock_irqrestore(&priv->reglock, flags);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+ "%s: fail setup ring\n", __func__);
+ goto err1;
+ }
+
+ ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+ IRQF_SHARED, "netsec", priv);
+ if (ret) {
+ netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+ goto err2;
+ }
+
+ if (dev_of_node(priv->dev)) {
+ if (!of_phy_connect(priv->ndev, priv->phy_np,
+ netsec_phy_adjust_link, 0,
+ priv->phy_interface)) {
+ netif_err(priv, link, priv->ndev, "missing PHY\n");
+ ret = -ENODEV;
+ goto err3;
+ }
+ } else {
+ ret = phy_connect_direct(priv->ndev, priv->phydev,
+ netsec_phy_adjust_link,
+ priv->phy_interface);
+ if (ret) {
+ netif_err(priv, link, priv->ndev,
+ "phy_connect_direct() failed (%d)\n", ret);
+ goto err3;
+ }
+ }
+
+ phy_start(ndev->phydev);
+
+ netsec_start_gmac(priv);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ /* Enable RX intr. */
+ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+ return 0;
+err3:
+ free_irq(priv->ndev->irq, priv);
+err2:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+ pm_runtime_put_sync(priv->dev);
+ return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(priv->ndev);
+ dma_wmb();
+
+ napi_disable(&priv->napi);
+
+ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+ netsec_stop_gmac(priv);
+
+ free_irq(priv->ndev->irq, priv);
+
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+
+ pm_runtime_put_sync(priv->dev);
+
+ return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+ if (ret)
+ return ret;
+
+ ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+ if (ret)
+ goto err1;
+
+ ret = netsec_reset_hardware(priv);
+ if (ret)
+ goto err2;
+
+ return 0;
+err2:
+ netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+ netsec_free_dring(priv, NETSEC_RING_TX);
+ return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ netsec_free_dring(priv, NETSEC_RING_RX);
+ netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+ return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+ .ndo_init = netsec_netdev_init,
+ .ndo_uninit = netsec_netdev_uninit,
+ .ndo_open = netsec_netdev_open,
+ .ndo_stop = netsec_netdev_stop,
+ .ndo_start_xmit = netsec_netdev_start_xmit,
+ .ndo_set_features = netsec_netdev_set_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+ struct netsec_priv *priv)
+{
+ priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_np) {
+ dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk not found\n");
+ return PTR_ERR(priv->clk);
+ }
+ priv->freq = clk_get_rate(priv->clk);
+
+ return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+ struct netsec_priv *priv, u32 *phy_addr)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "missing required property 'phy-channel'\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev,
+ "socionext,phy-clock-frequency",
+ &priv->freq);
+ if (ret)
+ dev_err(&pdev->dev,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (!dev_of_node(priv->dev) && phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
+
+ mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "SNI NETSEC MDIO";
+ bus->read = netsec_phy_read;
+ bus->write = netsec_phy_write;
+ bus->parent = priv->dev;
+ priv->mii_bus = bus;
+
+ if (dev_of_node(priv->dev)) {
+ struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+ mdio_node = of_get_child_by_name(parent, "mdio");
+ if (mdio_node) {
+ parent = mdio_node;
+ } else {
+ /* older f/w doesn't populate the mdio subnode,
+ * allow relaxed upgrade of f/w in due time.
+ */
+ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+ }
+
+ ret = of_mdiobus_register(bus, parent);
+ of_node_put(mdio_node);
+
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+ } else {
+ /* Mask out all PHYs from auto probing. */
+ bus->phy_mask = ~0;
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+ return ret;
+ }
+
+ priv->phydev = get_phy_device(bus, phy_addr, false);
+ if (IS_ERR(priv->phydev)) {
+ ret = PTR_ERR(priv->phydev);
+ dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+ priv->phydev = NULL;
+ return -ENODEV;
+ }
+
+ ret = phy_device_register(priv->phydev);
+ if (ret) {
+ mdiobus_unregister(bus);
+ dev_err(priv->dev,
+ "phy_device_register err(%d)\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+ struct resource *mmio_res, *eeprom_res, *irq_res;
+ u8 *mac, macbuf[ETH_ALEN];
+ struct netsec_priv *priv;
+ u32 hw_ver, phy_addr = 0;
+ struct net_device *ndev;
+ int ret;
+
+ mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mmio_res) {
+ dev_err(&pdev->dev, "No MMIO resource found.\n");
+ return -ENODEV;
+ }
+
+ eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!eeprom_res) {
+ dev_info(&pdev->dev, "No EEPROM resource found.\n");
+ return -ENODEV;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "No IRQ resource found.\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ spin_lock_init(&priv->reglock);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, priv);
+ ndev->irq = irq_res->start;
+ priv->dev = &pdev->dev;
+ priv->ndev = ndev;
+
+ priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+ if (priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ resource_size(mmio_res));
+ if (!priv->ioaddr) {
+ dev_err(&pdev->dev, "devm_ioremap() failed\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+ resource_size(eeprom_res));
+ if (!priv->eeprom_base) {
+ dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+ ret = -ENXIO;
+ goto free_ndev;
+ }
+
+ mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+ if (mac)
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ if (priv->eeprom_base &&
+ (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ void __iomem *macp = priv->eeprom_base +
+ NETSEC_EEPROM_MAC_ADDRESS;
+
+ ndev->dev_addr[0] = readb(macp + 3);
+ ndev->dev_addr[1] = readb(macp + 2);
+ ndev->dev_addr[2] = readb(macp + 1);
+ ndev->dev_addr[3] = readb(macp + 0);
+ ndev->dev_addr[4] = readb(macp + 7);
+ ndev->dev_addr[5] = readb(macp + 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ if (dev_of_node(&pdev->dev))
+ ret = netsec_of_probe(pdev, priv);
+ else
+ ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+ if (ret)
+ goto free_ndev;
+
+ if (!priv->freq) {
+ dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+ ret = -ENODEV;
+ goto free_ndev;
+ }
+
+ /* default for throughput */
+ priv->et_coalesce.rx_coalesce_usecs = 500;
+ priv->et_coalesce.rx_max_coalesced_frames = 8;
+ priv->et_coalesce.tx_coalesce_usecs = 500;
+ priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+ ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+ &ndev->max_mtu);
+ if (ret < 0)
+ ndev->max_mtu = ETH_DATA_LEN;
+
+ /* runtime_pm coverage just for probe, open/close also cover it */
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+ /* this driver only supports F_TAIKI style NETSEC */
+ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+ NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+ ret = -ENODEV;
+ goto pm_disable;
+ }
+
+ dev_info(&pdev->dev, "hardware revision %d.%d\n",
+ hw_ver >> 16, hw_ver & 0xffff);
+
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+ ndev->netdev_ops = &netsec_netdev_ops;
+ ndev->ethtool_ops = &netsec_ethtool_ops;
+
+ ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->hw_features = ndev->features;
+
+ priv->rx_cksum_offload_flag = true;
+
+ ret = netsec_register_mdio(priv, phy_addr);
+ if (ret)
+ goto unreg_napi;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+ dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ netif_err(priv, probe, ndev, "register_netdev() failed\n");
+ goto unreg_mii;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+
+unreg_mii:
+ netsec_unregister_mdio(priv);
+unreg_napi:
+ netif_napi_del(&priv->napi);
+pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+free_ndev:
+ free_netdev(ndev);
+ dev_err(&pdev->dev, "init failed\n");
+
+ return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+ struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+
+ netsec_unregister_mdio(priv);
+
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+ struct netsec_priv *priv = dev_get_drvdata(dev);
+
+ clk_prepare_enable(priv->clk);
+
+ netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+ NETSEC_CLK_EN_REG_DOM_C |
+ NETSEC_CLK_EN_REG_DOM_G);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+ SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+ { .compatible = "socionext,synquacer-netsec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+ { "SCX0001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+ .probe = netsec_probe,
+ .remove = netsec_remove,
+ .driver = {
+ .name = "netsec",
+ .pm = &netsec_pm_ops,
+ .of_match_table = netsec_dt_ids,
+ .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+ },
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644
index 000000000000..111e7ca9df56
--- /dev/null
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR 0x000 /* ID */
+#define AVE_VR 0x004 /* Version */
+#define AVE_GRR 0x008 /* Global Reset */
+#define AVE_CFGR 0x00c /* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR 0x100 /* Global Interrupt Mask */
+#define AVE_GISR 0x104 /* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR 0x200 /* TX Setup */
+#define AVE_RXCR 0x204 /* RX Setup */
+#define AVE_RXMAC1R 0x208 /* MAC address (lower) */
+#define AVE_RXMAC2R 0x20c /* MAC address (upper) */
+#define AVE_MDIOCTR 0x214 /* MDIO Control */
+#define AVE_MDIOAR 0x218 /* MDIO Address */
+#define AVE_MDIOWDR 0x21c /* MDIO Data */
+#define AVE_MDIOSR 0x220 /* MDIO Status */
+#define AVE_MDIORDR 0x224 /* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC 0x300 /* Descriptor Control */
+#define AVE_TXDC 0x304 /* TX Descriptor Configuration */
+#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC 0x34c /* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE 0x800 /* PF Base Address */
+#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */
+#define AVE_PFEN 0xffc /* Packet Filter Enable */
+#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64 12 /* Descriptor Size */
+
+#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32 8 /* Descriptor Size */
+
+#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */
+#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL 0x8028 /* Reset control */
+#define AVE_RSTCTRL_RMIIRST BIT(16)
+#define AVE_LINKSEL 0x8034 /* Link speed setting */
+#define AVE_LINKSEL_100M BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */
+#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */
+#define AVE_GRR_GRST BIT(0) /* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE BIT(31) /* Filter Function */
+#define AVE_CFGR_CHE BIT(30) /* Checksum Function */
+#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY BIT(24) /* PHY interrupt */
+#define AVE_GI_TX BIT(16) /* Tx complete */
+#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */
+#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP BIT(6) /* Drop packet */
+#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */
+#define AVE_TXCR_TXSPD_1G BIT(17)
+#define AVE_TXCR_TXSPD_100 BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN BIT(30) /* Rx enable */
+#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */
+#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */
+#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */
+#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */
+#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS BIT(0) /* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK GENMASK(31, 16)
+#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */
+#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */
+#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */
+#define AVE_TXDC_ADDR_START 0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */
+#define AVE_RXDC0_ADDR_START 0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN BIT(31) /* Descriptor ownership */
+#define AVE_STS_INTR BIT(29) /* Request for interrupt */
+#define AVE_STS_OK BIT(27) /* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */
+#define AVE_STS_1ST BIT(26) /* Head of buffer chain */
+#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */
+#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */
+#define AVE_STS_EC BIT(20) /* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV BIT(21) /* Checksum check performed */
+#define AVE_STS_CSER BIT(20) /* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1 GENMASK(25, 0)
+#define AVE_PFMBIT_MASK GENMASK(15, 0)
+
+#define AVE_PF_SIZE 17 /* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER 0 /* No.0 */
+#define AVE_PFNUM_UNICAST 1 /* No.1 */
+#define AVE_PFNUM_BROADCAST 2 /* No.2 */
+#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC 32 /* Tx descriptor */
+#define AVE_NR_RXDESC 64 /* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS 0
+#define AVE_DESC_OFS_ADDRL 4
+#define AVE_DESC_OFS_ADDRU 8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME 1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT 20
+#define AVE_FORCE_TXINTCNT 1
+
+#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
+
+enum desc_id {
+ AVE_DESCID_RX,
+ AVE_DESCID_TX,
+};
+
+enum desc_state {
+ AVE_DESC_RX_PERMIT,
+ AVE_DESC_RX_SUSPEND,
+ AVE_DESC_START,
+ AVE_DESC_STOP,
+};
+
+struct ave_desc {
+ struct sk_buff *skbs;
+ dma_addr_t skbs_dma;
+ size_t skbs_dmalen;
+};
+
+struct ave_desc_info {
+ u32 ndesc; /* number of descriptor */
+ u32 daddr; /* start address of descriptor */
+ u32 proc_idx; /* index of processing packet */
+ u32 done_idx; /* index of processed packet */
+ struct ave_desc *desc; /* skb info related descriptor */
+};
+
+struct ave_soc_data {
+ bool is_desc_64bit;
+};
+
+struct ave_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+ u64 errors;
+ u64 dropped;
+ u64 collisions;
+ u64 fifo_errors;
+};
+
+struct ave_private {
+ void __iomem *base;
+ int irq;
+ int phy_id;
+ unsigned int desc_size;
+ u32 msg_enable;
+ struct clk *clk;
+ struct reset_control *rst;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ struct mii_bus *mdio;
+
+ /* stats */
+ struct ave_stats stats_rx;
+ struct ave_stats stats_tx;
+
+ /* NAPI support */
+ struct net_device *ndev;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+
+ /* descriptor */
+ struct ave_desc_info rx;
+ struct ave_desc_info tx;
+
+ /* flow control */
+ int pause_auto;
+ int pause_rx;
+ int pause_tx;
+
+ const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+ int offset)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry)
+{
+ return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+ int entry, int offset, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 addr;
+
+ addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+ + entry * priv->desc_size + offset;
+
+ writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+ int entry, u32 val)
+{
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+ int entry, dma_addr_t paddr)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+ lower_32_bits(paddr));
+ if (IS_DESC_64BIT(priv))
+ ave_desc_write(ndev, id,
+ entry, AVE_DESC_OFS_ADDRU,
+ upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 ret;
+
+ ret = readl(priv->base + AVE_GIMR);
+ writel(0, priv->base + AVE_GIMR);
+
+ return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+ writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+ const unsigned char *mac_addr,
+ int reg1, int reg2)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ writel(mac_addr[0] | mac_addr[1] << 8 |
+ mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+ writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 major, minor, vr;
+
+ vr = readl(priv->base + AVE_VR);
+ major = (vr & GENMASK(15, 8)) >> 8;
+ minor = (vr & GENMASK(7, 0));
+ snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct device *dev = ndev->dev.parent;
+
+ strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (ndev->phydev)
+ phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ if (!ndev->phydev ||
+ (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+ return -EOPNOTSUPP;
+
+ ret = phy_ethtool_set_wol(ndev->phydev, wol);
+ if (!ret)
+ device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+ return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ pause->autoneg = priv->pause_auto;
+ pause->rx_pause = priv->pause_rx;
+ pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -EINVAL;
+
+ priv->pause_auto = pause->autoneg;
+ priv->pause_rx = pause->rx_pause;
+ priv->pause_tx = pause->tx_pause;
+
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (pause->rx_pause)
+ phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ if (pause->tx_pause)
+ phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ phy_start_aneg(phydev);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_drvinfo = ave_ethtool_get_drvinfo,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ave_ethtool_get_msglevel,
+ .set_msglevel = ave_ethtool_set_msglevel,
+ .get_wol = ave_ethtool_get_wol,
+ .set_wol = ave_ethtool_set_wol,
+ .get_pauseparam = ave_ethtool_get_pauseparam,
+ .set_pauseparam = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* read request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret) {
+ netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+ phyid, regnum);
+ return ret;
+ }
+
+ return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+ u16 val)
+{
+ struct net_device *ndev = bus->priv;
+ struct ave_private *priv;
+ u32 mdioctl, mdiosr;
+ int ret;
+
+ priv = netdev_priv(ndev);
+
+ /* write address */
+ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+ /* write data */
+ writel(val, priv->base + AVE_MDIOWDR);
+
+ /* write request */
+ mdioctl = readl(priv->base + AVE_MDIOCTR);
+ writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+ priv->base + AVE_MDIOCTR);
+
+ ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+ !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+ if (ret)
+ netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+ phyid, regnum);
+
+ return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+ void *ptr, size_t len, enum dma_data_direction dir,
+ dma_addr_t *paddr)
+{
+ dma_addr_t map_addr;
+
+ map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+ return -ENOMEM;
+
+ desc->skbs_dma = map_addr;
+ desc->skbs_dmalen = len;
+ *paddr = map_addr;
+
+ return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+ enum dma_data_direction dir)
+{
+ if (!desc->skbs_dma)
+ return;
+
+ dma_unmap_single(ndev->dev.parent,
+ desc->skbs_dma, desc->skbs_dmalen, dir);
+ desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = priv->rx.desc[entry].skbs;
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(ndev,
+ AVE_MAX_ETHFRAME);
+ if (!skb) {
+ netdev_err(ndev, "can't allocate skb for Rx\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* set disable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_STS_OWN);
+
+ /* map Rx buffer
+ * Rx buffer set to the Rx descriptor has two restrictions:
+ * - Rx buffer address is 4 byte aligned.
+ * - Rx buffer begins with 2 byte headroom, and data will be put from
+ * (buffer + 2).
+ * To satisfy this, specify the address to put back the buffer
+ * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+ * and expand the map size by NET_IP_ALIGN.
+ */
+ ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+ skb->data - NET_IP_ALIGN,
+ AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+ DMA_FROM_DEVICE, &paddr);
+ if (ret) {
+ netdev_err(ndev, "can't map skb for Rx\n");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ priv->rx.desc[entry].skbs = skb;
+
+ /* set buffer pointer */
+ ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+ /* set enable to cmdsts */
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+ AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+ return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+ u32 val;
+
+ switch (state) {
+ case AVE_DESC_START:
+ writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+ break;
+
+ case AVE_DESC_STOP:
+ writel(0, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+ 150, 15000)) {
+ netdev_err(ndev, "can't stop descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_SUSPEND:
+ val = readl(priv->base + AVE_DESCC);
+ val |= AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+ val & (AVE_DESCC_RDSTP << 16),
+ 150, 150000)) {
+ netdev_err(ndev, "can't suspend descriptor\n");
+ ret = -EBUSY;
+ }
+ break;
+
+ case AVE_DESC_RX_PERMIT:
+ val = readl(priv->base + AVE_DESCC);
+ val &= ~AVE_DESCC_RDSTP;
+ val &= ~AVE_DESCC_STATUS_MASK;
+ writel(val, priv->base + AVE_DESCC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ unsigned int nr_freebuf = 0;
+ unsigned int tx_packets = 0;
+ unsigned int tx_bytes = 0;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+
+ /* free pre-stored skb from done_idx to proc_idx */
+ while (proc_idx != done_idx) {
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+ /* do nothing if owner is HW (==1 for Tx) */
+ if (cmdsts & AVE_STS_OWN)
+ break;
+
+ /* check Tx status and updates statistics */
+ if (cmdsts & AVE_STS_OK) {
+ tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+ /* success */
+ if (cmdsts & AVE_STS_LAST)
+ tx_packets++;
+ } else {
+ /* error */
+ if (cmdsts & AVE_STS_LAST) {
+ priv->stats_tx.errors++;
+ if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+ priv->stats_tx.collisions++;
+ }
+ }
+
+ /* release skb */
+ if (priv->tx.desc[done_idx].skbs) {
+ ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+ DMA_TO_DEVICE);
+ dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+ priv->tx.desc[done_idx].skbs = NULL;
+ nr_freebuf++;
+ }
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->tx.done_idx = done_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_tx.syncp);
+ priv->stats_tx.packets += tx_packets;
+ priv->stats_tx.bytes += tx_bytes;
+ u64_stats_update_end(&priv->stats_tx.syncp);
+
+ /* wake queue for freeing buffer */
+ if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+ netif_wake_queue(ndev);
+
+ return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int rx_packets = 0;
+ unsigned int rx_bytes = 0;
+ u32 proc_idx, done_idx;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+ int restpkt, npkts;
+ u32 ndesc, cmdsts;
+
+ proc_idx = priv->rx.proc_idx;
+ done_idx = priv->rx.done_idx;
+ ndesc = priv->rx.ndesc;
+ restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+ for (npkts = 0; npkts < num; npkts++) {
+ /* we can't receive more packet, so fill desc quickly */
+ if (--restpkt < 0)
+ break;
+
+ cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+ /* do nothing if owner is HW (==0 for Rx) */
+ if (!(cmdsts & AVE_STS_OWN))
+ break;
+
+ if (!(cmdsts & AVE_STS_OK)) {
+ priv->stats_rx.errors++;
+ proc_idx = (proc_idx + 1) % ndesc;
+ continue;
+ }
+
+ pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+ /* get skbuff for rx */
+ skb = priv->rx.desc[proc_idx].skbs;
+ priv->rx.desc[proc_idx].skbs = NULL;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+ skb->dev = ndev;
+ skb_put(skb, pktlen);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_packets++;
+ rx_bytes += pktlen;
+
+ netif_receive_skb(skb);
+
+ proc_idx = (proc_idx + 1) % ndesc;
+ }
+
+ priv->rx.proc_idx = proc_idx;
+
+ /* update stats */
+ u64_stats_update_begin(&priv->stats_rx.syncp);
+ priv->stats_rx.packets += rx_packets;
+ priv->stats_rx.bytes += rx_bytes;
+ u64_stats_update_end(&priv->stats_rx.syncp);
+
+ /* refill the Rx buffers */
+ while (proc_idx != done_idx) {
+ if (ave_rxdesc_prepare(ndev, done_idx))
+ break;
+ done_idx = (done_idx + 1) % ndesc;
+ }
+
+ priv->rx.done_idx = done_idx;
+
+ return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_rx);
+ ndev = priv->ndev;
+
+ num = ave_rx_receive(ndev, budget);
+ if (num < budget) {
+ napi_complete_done(napi, num);
+
+ /* enable Rx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_RXIINT);
+ }
+
+ return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct ave_private *priv;
+ struct net_device *ndev;
+ int num;
+
+ priv = container_of(napi, struct ave_private, napi_tx);
+ ndev = priv->ndev;
+
+ num = ave_tx_complete(ndev);
+ napi_complete(napi);
+
+ /* enable Tx interrupt when NAPI finishes */
+ ave_irq_enable(ndev, AVE_GI_TX);
+
+ return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* set config register */
+ val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+ val |= AVE_CFGR_MII;
+ writel(val, priv->base + AVE_CFGR);
+
+ /* reset RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val &= ~AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ /* assert reset */
+ writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+ msleep(20);
+
+ /* 1st, negate PHY reset only */
+ writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ msleep(40);
+
+ /* negate RMII register */
+ val = readl(priv->base + AVE_RSTCTRL);
+ val |= AVE_RSTCTRL_RMIIRST;
+ writel(val, priv->base + AVE_RSTCTRL);
+
+ ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 rxcr_org;
+
+ /* save and disable MAC receive op */
+ rxcr_org = readl(priv->base + AVE_RXCR);
+ writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+ /* suspend Rx descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+ /* receive all packets before descriptor starts */
+ ave_rx_receive(ndev, priv->rx.ndesc);
+
+ /* assert reset */
+ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+ usleep_range(40, 50);
+
+ /* negate reset */
+ writel(0, priv->base + AVE_GRR);
+ usleep_range(10, 20);
+
+ /* negate interrupt status */
+ writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+ /* permit descriptor */
+ ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+ /* restore MAC reccieve op */
+ writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+ struct net_device *ndev = (struct net_device *)netdev;
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 gimr_val, gisr_val;
+
+ gimr_val = ave_irq_disable_all(ndev);
+
+ /* get interrupt status */
+ gisr_val = readl(priv->base + AVE_GISR);
+
+ /* PHY */
+ if (gisr_val & AVE_GI_PHY)
+ writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+ /* check exceeding packet */
+ if (gisr_val & AVE_GI_RXERR) {
+ writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+ netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+ }
+
+ gisr_val &= gimr_val;
+ if (!gisr_val)
+ goto exit_isr;
+
+ /* RxFIFO overflow */
+ if (gisr_val & AVE_GI_RXOVF) {
+ priv->stats_rx.fifo_errors++;
+ ave_rxfifo_reset(ndev);
+ goto exit_isr;
+ }
+
+ /* Rx drop */
+ if (gisr_val & AVE_GI_RXDROP) {
+ priv->stats_rx.dropped++;
+ writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+ }
+
+ /* Rx interval */
+ if (gisr_val & AVE_GI_RXIINT) {
+ napi_schedule(&priv->napi_rx);
+ /* still force to disable Rx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_RXIINT;
+ }
+
+ /* Tx completed */
+ if (gisr_val & AVE_GI_TX) {
+ napi_schedule(&priv->napi_tx);
+ /* still force to disable Tx interrupt until NAPI finishes */
+ gimr_val &= ~AVE_GI_TX;
+ }
+
+exit_isr:
+ ave_irq_restore(ndev, gimr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 val;
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+
+ val = readl(priv->base + AVE_PFEN);
+ writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+ return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+ unsigned int entry,
+ const unsigned char *mac_addr,
+ unsigned int set_size)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return -EINVAL;
+ if (WARN_ON(set_size > 6))
+ return -EINVAL;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set MAC address for the filter */
+ ave_hw_write_macaddr(ndev, mac_addr,
+ AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+ /* set byte mask */
+ writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+ priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to ring 0 */
+ writel(0, priv->base + AVE_PFSEL(entry));
+
+ /* restart filter */
+ ave_pfsel_start(ndev, entry);
+
+ return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+ unsigned int entry, u32 rxring)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ if (WARN_ON(entry > AVE_PF_SIZE))
+ return;
+
+ ave_pfsel_stop(ndev, entry);
+
+ /* set byte mask */
+ writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+ writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+ /* set bit mask filter */
+ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+ /* set selector to rxring */
+ writel(rxring, priv->base + AVE_PFSEL(entry));
+
+ ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+ unsigned char bcast_mac[ETH_ALEN];
+ int i;
+
+ eth_broadcast_addr(bcast_mac);
+
+ for (i = 0; i < AVE_PF_SIZE; i++)
+ ave_pfsel_stop(ndev, i);
+
+ /* promiscious entry, select ring 0 */
+ ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+ /* unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+ /* broadcast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ u32 val, txcr, rxcr, rxcr_org;
+ u16 rmt_adv = 0, lcl_adv = 0;
+ u8 cap;
+
+ /* set RGMII speed */
+ val = readl(priv->base + AVE_TXCR);
+ val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+ if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+ val |= AVE_TXCR_TXSPD_1G;
+ else if (phydev->speed == SPEED_100)
+ val |= AVE_TXCR_TXSPD_100;
+
+ writel(val, priv->base + AVE_TXCR);
+
+ /* set RMII speed (100M/10M only) */
+ if (!phy_interface_is_rgmii(phydev)) {
+ val = readl(priv->base + AVE_LINKSEL);
+ if (phydev->speed == SPEED_10)
+ val &= ~AVE_LINKSEL_100M;
+ else
+ val |= AVE_LINKSEL_100M;
+ writel(val, priv->base + AVE_LINKSEL);
+ }
+
+ /* check current RXCR/TXCR */
+ rxcr = readl(priv->base + AVE_RXCR);
+ txcr = readl(priv->base + AVE_TXCR);
+ rxcr_org = rxcr;
+
+ if (phydev->duplex) {
+ rxcr |= AVE_RXCR_FDUPEN;
+
+ if (phydev->pause)
+ rmt_adv |= LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (cap & FLOW_CTRL_TX)
+ txcr |= AVE_TXCR_FLOCTR;
+ else
+ txcr &= ~AVE_TXCR_FLOCTR;
+ if (cap & FLOW_CTRL_RX)
+ rxcr |= AVE_RXCR_FLOCTR;
+ else
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ } else {
+ rxcr &= ~AVE_RXCR_FDUPEN;
+ rxcr &= ~AVE_RXCR_FLOCTR;
+ txcr &= ~AVE_TXCR_FLOCTR;
+ }
+
+ if (rxcr_org != rxcr) {
+ /* disable Rx mac */
+ writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+ /* change and enable TX/Rx mac */
+ writel(txcr, priv->base + AVE_TXCR);
+ writel(rxcr, priv->base + AVE_RXCR);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+ ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+ /* pfsel unicast entry */
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ave_private *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct device_node *mdio_np;
+ struct phy_device *phydev;
+ int ret;
+
+ /* enable clk because of hw access until ndo_open */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "can't enable clock\n");
+ return ret;
+ }
+ ret = reset_control_deassert(priv->rst);
+ if (ret) {
+ dev_err(dev, "can't deassert reset\n");
+ goto out_clk_disable;
+ }
+
+ ave_global_reset(ndev);
+
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np) {
+ dev_err(dev, "mdio node not found\n");
+ ret = -EINVAL;
+ goto out_reset_assert;
+ }
+ ret = of_mdiobus_register(priv->mdio, mdio_np);
+ of_node_put(mdio_np);
+ if (ret) {
+ dev_err(dev, "failed to register mdiobus\n");
+ goto out_reset_assert;
+ }
+
+ phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+ if (!phydev) {
+ dev_err(dev, "could not attach to PHY\n");
+ ret = -ENODEV;
+ goto out_mdio_unregister;
+ }
+
+ priv->phydev = phydev;
+
+ phy_ethtool_get_wol(phydev, &wol);
+ device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+ if (!phy_interface_is_rgmii(phydev)) {
+ phydev->supported &= ~PHY_GBIT_FEATURES;
+ phydev->supported |= PHY_BASIC_FEATURES;
+ }
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ phy_attached_info(phydev);
+
+ return 0;
+
+out_mdio_unregister:
+ mdiobus_unregister(priv->mdio);
+out_reset_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mdio);
+
+ /* disable clk because of hw access after ndo_stop */
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+ int ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+ ndev);
+ if (ret)
+ return ret;
+
+ priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+ GFP_KERNEL);
+ if (!priv->tx.desc) {
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+ GFP_KERNEL);
+ if (!priv->rx.desc) {
+ kfree(priv->tx.desc);
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ /* initialize Tx work and descriptor */
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+ }
+ writel(AVE_TXDC_ADDR_START |
+ (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+ priv->base + AVE_TXDC);
+
+ /* initialize Rx work and descriptor */
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (ave_rxdesc_prepare(ndev, entry))
+ break;
+ }
+ writel(AVE_RXDC0_ADDR_START |
+ (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+ priv->base + AVE_RXDC0);
+
+ ave_desc_switch(ndev, AVE_DESC_START);
+
+ ave_pfsel_init(ndev);
+ ave_macaddr_init(ndev);
+
+ /* set Rx configuration */
+ /* full duplex, enable pause drop, enalbe flow control */
+ val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+ AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set Tx configuration */
+ /* enable flow control, disable loopback */
+ writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+ /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+ val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+ val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+ writel(val, priv->base + AVE_IIRQC);
+
+ val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+ ave_irq_restore(ndev, val);
+
+ napi_enable(&priv->napi_rx);
+ napi_enable(&priv->napi_tx);
+
+ phy_start(ndev->phydev);
+ phy_start_aneg(ndev->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_free_irq:
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ int entry;
+
+ ave_irq_disable_all(ndev);
+ disable_irq(priv->irq);
+ free_irq(priv->irq, ndev);
+
+ netif_tx_disable(ndev);
+ phy_stop(ndev->phydev);
+ napi_disable(&priv->napi_tx);
+ napi_disable(&priv->napi_rx);
+
+ ave_desc_switch(ndev, AVE_DESC_STOP);
+
+ /* free Tx buffer */
+ for (entry = 0; entry < priv->tx.ndesc; entry++) {
+ if (!priv->tx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+ priv->tx.desc[entry].skbs = NULL;
+ }
+ priv->tx.proc_idx = 0;
+ priv->tx.done_idx = 0;
+
+ /* free Rx buffer */
+ for (entry = 0; entry < priv->rx.ndesc; entry++) {
+ if (!priv->rx.desc[entry].skbs)
+ continue;
+
+ ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+ priv->rx.desc[entry].skbs = NULL;
+ }
+ priv->rx.proc_idx = 0;
+ priv->rx.done_idx = 0;
+
+ kfree(priv->tx.desc);
+ kfree(priv->rx.desc);
+
+ return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ u32 proc_idx, done_idx, ndesc, cmdsts;
+ int ret, freepkt;
+ dma_addr_t paddr;
+
+ proc_idx = priv->tx.proc_idx;
+ done_idx = priv->tx.done_idx;
+ ndesc = priv->tx.ndesc;
+ freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+ /* stop queue when not enough entry */
+ if (unlikely(freepkt < 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* add padding for short packet */
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* map Tx buffer
+ * Tx buffer set to the Tx descriptor doesn't have any restriction.
+ */
+ ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+ skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ priv->stats_tx.dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx.desc[proc_idx].skbs = skb;
+
+ ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+ cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+ (skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+ /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+ if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+ cmdsts |= AVE_STS_INTR;
+
+ /* disable checksum calculation when skb doesn't calurate checksum */
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ cmdsts |= AVE_STS_NOCSUM;
+
+ ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+ priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+ return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_adr;
+ int count, mc_cnt;
+ u32 val;
+
+ /* MAC addr filter enable for promiscious mode */
+ mc_cnt = netdev_mc_count(ndev);
+ val = readl(priv->base + AVE_RXCR);
+ if (ndev->flags & IFF_PROMISC || !mc_cnt)
+ val &= ~AVE_RXCR_AFEN;
+ else
+ val |= AVE_RXCR_AFEN;
+ writel(val, priv->base + AVE_RXCR);
+
+ /* set all multicast address */
+ if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+ v4multi_macadr, 1);
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+ v6multi_macadr, 1);
+ } else {
+ /* stop all multicast filter */
+ for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+ ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+ /* set multicast addresses */
+ count = 0;
+ netdev_for_each_mc_addr(hw_adr, ndev) {
+ if (count == mc_cnt)
+ break;
+ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+ hw_adr->addr, 6);
+ count++;
+ }
+ }
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ave_private *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+ stats->rx_packets = priv->stats_rx.packets;
+ stats->rx_bytes = priv->stats_rx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+ stats->tx_packets = priv->stats_tx.packets;
+ stats->tx_bytes = priv->stats_tx.bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+ stats->rx_errors = priv->stats_rx.errors;
+ stats->tx_errors = priv->stats_tx.errors;
+ stats->rx_dropped = priv->stats_rx.dropped;
+ stats->tx_dropped = priv->stats_tx.dropped;
+ stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+ stats->collisions = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+ int ret = eth_mac_addr(ndev, p);
+
+ if (ret)
+ return ret;
+
+ ave_macaddr_init(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+ .ndo_init = ave_init,
+ .ndo_uninit = ave_uninit,
+ .ndo_open = ave_open,
+ .ndo_stop = ave_stop,
+ .ndo_start_xmit = ave_start_xmit,
+ .ndo_do_ioctl = ave_ioctl,
+ .ndo_set_rx_mode = ave_set_rx_mode,
+ .ndo_get_stats64 = ave_get_stats64,
+ .ndo_set_mac_address = ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+ const struct ave_soc_data *data;
+ struct device *dev = &pdev->dev;
+ char buf[ETHTOOL_FWVERS_LEN];
+ phy_interface_t phy_mode;
+ struct ave_private *priv;
+ struct net_device *ndev;
+ struct device_node *np;
+ struct resource *res;
+ const void *mac_addr;
+ void __iomem *base;
+ u64 dma_mask;
+ int irq, ret;
+ u32 ave_id;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ np = dev->of_node;
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(dev, "phy-mode not found\n");
+ return -EINVAL;
+ }
+ if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+ phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ dev_err(dev, "phy-mode is invalid\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "IRQ not found\n");
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ndev = alloc_etherdev(sizeof(struct ave_private));
+ if (!ndev) {
+ dev_err(dev, "can't allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ ndev->netdev_ops = &ave_netdev_ops;
+ ndev->ethtool_ops = &ave_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+ ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+ ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+
+ /* if the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+
+ priv = netdev_priv(ndev);
+ priv->base = base;
+ priv->irq = irq;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+ priv->phy_mode = phy_mode;
+ priv->data = data;
+
+ if (IS_DESC_64BIT(priv)) {
+ priv->desc_size = AVE_DESC_SIZE_64;
+ priv->tx.daddr = AVE_TXDM_64;
+ priv->rx.daddr = AVE_RXDM_64;
+ dma_mask = DMA_BIT_MASK(64);
+ } else {
+ priv->desc_size = AVE_DESC_SIZE_32;
+ priv->tx.daddr = AVE_TXDM_32;
+ priv->rx.daddr = AVE_RXDM_32;
+ dma_mask = DMA_BIT_MASK(32);
+ }
+ ret = dma_set_mask(dev, dma_mask);
+ if (ret)
+ goto out_free_netdev;
+
+ priv->tx.ndesc = AVE_NR_TXDESC;
+ priv->rx.ndesc = AVE_NR_RXDESC;
+
+ u64_stats_init(&priv->stats_tx.syncp);
+ u64_stats_init(&priv->stats_rx.syncp);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_free_netdev;
+ }
+
+ priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ goto out_free_netdev;
+ }
+
+ priv->mdio = devm_mdiobus_alloc(dev);
+ if (!priv->mdio) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+ priv->mdio->priv = ndev;
+ priv->mdio->parent = dev;
+ priv->mdio->read = ave_mdiobus_read;
+ priv->mdio->write = ave_mdiobus_write;
+ priv->mdio->name = "uniphier-mdio";
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register as a NAPI supported driver */
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+ netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+ priv->tx.ndesc);
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register netdevice\n");
+ goto out_del_napi;
+ }
+
+ /* get ID and version */
+ ave_id = readl(priv->base + AVE_IDR);
+ ave_hw_read_version(ndev, buf, sizeof(buf));
+
+ dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+ (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+ (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+ buf, priv->irq, phy_modes(phy_mode));
+
+ return 0;
+
+out_del_napi:
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ave_private *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi_rx);
+ netif_napi_del(&priv->napi_tx);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+ .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+ .is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-ave4",
+ .data = &ave_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-ave4",
+ .data = &ave_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-ave4",
+ .data = &ave_ld11_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-ave4",
+ .data = &ave_ld20_data,
+ },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+ .probe = ave_probe,
+ .remove = ave_remove,
+ .driver = {
+ .name = "ave",
+ .of_match_table = of_ave_match,
+ },
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e1e5ac053760..2ffe76c0ff74 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
/* get timestamp value */
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
- int (*get_rx_timestamp_status) (void *desc, u32 ats);
+ int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
/* Display ring */
void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */
@@ -474,7 +474,7 @@ struct mac_device_info;
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init)(struct mac_device_info *hw, int mtu);
+ void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b32c5..5270d26f0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+#define PRG_ETH0_RGMII_TX_CLK_EN 10
#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
struct clk_divider m250_div;
struct clk *m250_div_clk;
- struct clk_divider m25_div;
- struct clk *m25_div_clk;
+ struct clk_fixed_factor fixed_div2;
+ struct clk *fixed_div2_clk;
+
+ struct clk_gate rgmii_tx_en;
+ struct clk *rgmii_tx_en_clk;
u32 tx_delay_ns;
};
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
writel(data, dwmac->regs + reg);
}
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{
struct clk_init_data init;
int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
char clk_name[32];
const char *clk_div_parents[1];
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
- static const struct clk_div_table clk_25m_div_table[] = {
- { .val = 0, .div = 5 },
- { .val = 1, .div = 10 },
- { /* sentinel */ },
- };
/* get the mux parents from DT */
for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
init.name = clk_name;
init.ops = &clk_mux_ops;
- init.flags = 0;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = mux_parent_names;
init.num_parents = MUX_CLK_NUM_PARENTS;
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
dwmac->m250_div.hw.init = &init;
- dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
return PTR_ERR(dwmac->m250_div_clk);
- /* create the m25_div */
- snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ /* create the fixed_div2 */
+ snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_divider_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = CLK_SET_RATE_PARENT;
clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
init.parent_names = clk_div_parents;
init.num_parents = ARRAY_SIZE(clk_div_parents);
- dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
- dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
- dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
- dwmac->m25_div.table = clk_25m_div_table;
- dwmac->m25_div.hw.init = &init;
- dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+ dwmac->fixed_div2.mult = 1;
+ dwmac->fixed_div2.div = 2;
+ dwmac->fixed_div2.hw.init = &init;
- dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
- if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
- return PTR_ERR(dwmac->m25_div_clk);
+ dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+ return PTR_ERR(dwmac->fixed_div2_clk);
+
+ /* create the rgmii_tx_en */
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+ dev_name(dev));
+ init.ops = &clk_gate_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ dwmac->rgmii_tx_en.hw.init = &init;
+
+ dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+ &dwmac->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+ return PTR_ERR(dwmac->rgmii_tx_en_clk);
return 0;
}
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
- unsigned long clk_rate;
u8 tx_dly_val = 0;
switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- /* Generate a 25MHz clock for the PHY */
- clk_rate = 25 * 1000 * 1000;
-
/* enable RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+ /* Configure the 125MHz RGMII TX clock, the IP block changes
+ * the output automatically (= without us having to configure
+ * a register) based on the line-speed (125MHz for Gbit speeds,
+ * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+ */
+ ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to set RGMII TX clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev,
+ "failed to enable the RGMII TX clock\n");
+ return ret;
+ }
break;
case PHY_INTERFACE_MODE_RMII:
- /* Use the rate of the mux clock for the internal RMII PHY */
- clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
/* disable RGMII mode -> enables RMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- ret = clk_prepare_enable(dwmac->m25_div_clk);
- if (ret) {
- dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
- return ret;
- }
-
- ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
- if (ret) {
- clk_disable_unprepare(dwmac->m25_div_clk);
-
- dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
- return ret;
- }
-
/* enable TX_CLK and PHY_REF_CLK generator */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- ret = meson8b_init_clk(dwmac);
+ ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
{
struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- clk_disable_unprepare(dwmac->m25_div_clk);
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+ clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
return stmmac_pltfr_remove(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 9eb7f65d8000..a3fa65b1ca8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -483,7 +483,8 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void sun8i_dwmac_core_init(struct mac_device_info *hw, int mtu)
+static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8a86340ff2d3..540d21786a43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -25,18 +25,28 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac_pcs.h"
#include "dwmac1000.h"
-static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac1000_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+ int mtu = dev->mtu;
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONTROL_ACS;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 8ef517356313..91b23f9db31a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -25,15 +25,26 @@
*******************************************************************************/
#include <linux/crc32.h>
+#include <net/dsa.h>
#include <asm/io.h>
#include "dwmac100.h"
-static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac100_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + MAC_CONTROL);
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+ value |= MAC_CORE_INIT;
+
+ /* Clear ASTP bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~MAC_CONTROL_ASTP;
+
+ writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f3ed8f7853eb..ed222b20fcf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,16 +17,26 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <net/dsa.h>
#include "stmmac_pcs.h"
#include "dwmac4.h"
-static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+static void dwmac4_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
+ int mtu = dev->mtu;
value |= GMAC_CORE_INIT;
+ /* Clear ACS bit because Ethernet switch tagging formats such as
+ * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+ * hardware to truncate packets on reception.
+ */
+ if (netdev_uses_dsa(dev))
+ value &= ~GMAC_CONFIG_ACS;
+
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4b286e27c4ca..c728ffa095de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
return ret;
}
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
/* Check if timestamp is OK from context descriptor */
do {
- ret = dwmac4_rx_check_timestamp(desc);
+ ret = dwmac4_rx_check_timestamp(next_desc);
if (ret < 0)
goto exit;
i++;
@@ -333,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -376,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
if (tx_own)
tdes3 |= TDES3_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -405,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(p),
le32_to_cpu(p->des0), le32_to_cpu(p->des1),
le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7546b3664113..6768a25b6aa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -341,7 +341,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
if (tx_own)
tdes0 |= ETDES0_OWN;
- if (is_fs & tx_own)
+ if (is_fs && tx_own)
/* When the own bit, for the first frame, has to be set, all
* descriptors for the same frame has to be set before, to
* avoid race condition.
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
@@ -427,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
(unsigned int)x, (unsigned int)(x >> 32),
ep->basic.des2, ep->basic.des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index f817f8f36569..ebd9e5e00f16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
u64 x;
x = *(u64 *)p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
i, (unsigned int)virt_to_phys(p),
(unsigned int)x, (unsigned int)(x >> 32),
p->des2, p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 721b61655261..08c19ebd5306 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
+ u32 reg_value;
/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
data &= PTP_SSIR_SSINC_MASK;
+ reg_value = data;
if (gmac4)
- data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+ reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
- writel(data, ioaddr + PTP_SSIR);
+ writel(reg_value, ioaddr + PTP_SSIR);
return data;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d7250539d0bd..7ad841434ec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
+ int interface = priv->plat->interface;
unsigned long flags;
bool ret = false;
+ if ((interface != PHY_INTERFACE_MODE_MII) &&
+ (interface != PHY_INTERFACE_MODE_GMII) &&
+ !phy_interface_mode_is_rgmii(interface))
+ goto out;
+
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
@@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
desc = np;
/* Check if timestamp is available */
- if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+ if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
@@ -1997,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
- int status;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
u32 chan;
+ bool poll_scheduled = false;
+ int status[channels_to_check];
+
+ /* Each DMA channel can be used for rx and tx simultaneously, yet
+ * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+ * stmmac_channel struct.
+ * Because of this, stmmac_poll currently checks (and possibly wakes)
+ * all tx queues rather than just a single tx queue.
+ */
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ &priv->xstats,
+ chan);
- for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ for (chan = 0; chan < rx_channel_count; chan++) {
+ if (likely(status[chan] & handle_rx)) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- status = priv->hw->dma->dma_interrupt(priv->ioaddr,
- &priv->xstats, chan);
- if (likely((status & handle_rx)) || (status & handle_tx)) {
if (likely(napi_schedule_prep(&rx_q->napi))) {
stmmac_disable_dma_irq(priv, chan);
__napi_schedule(&rx_q->napi);
+ poll_scheduled = true;
}
}
+ }
- if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* If we scheduled poll, we already know that tx queues will be checked.
+ * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+ * completed transmission, if so, call stmmac_poll (once).
+ */
+ if (!poll_scheduled) {
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (status[chan] & handle_tx) {
+ /* It doesn't matter what rx queue we choose
+ * here. We use 0 since it always exists.
+ */
+ struct stmmac_rx_queue *rx_q =
+ &priv->rx_queue[0];
+
+ if (likely(napi_schedule_prep(&rx_q->napi))) {
+ stmmac_disable_dma_irq(priv, chan);
+ __napi_schedule(&rx_q->napi);
+ }
+ break;
+ }
+ }
+ }
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
(tc <= 256)) {
@@ -2029,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
chan);
priv->xstats.threshold = tc;
}
- } else if (unlikely(status == tx_hard_error)) {
+ } else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
}
@@ -2483,7 +2527,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Initialize the MAC Core */
- priv->hw->mac->core_init(priv->hw, dev->mtu);
+ priv->hw->mac->core_init(priv->hw, dev);
/* Initialize MTL*/
if (priv->synopsys_id >= DWMAC_CORE_4_00)
@@ -3398,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
if (netif_msg_rx_status(priv)) {
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
- if (frame_len > ETH_FRAME_LEN)
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+ frame_len, status);
}
/* The zero-copy is always used for all the sizes
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index d655a4261e98..eb1c6b03c329 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
struct sk_buff *skb, bool tx_rx)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
unsigned char buffer[128];
- unsigned int i, j;
+ unsigned int i;
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
+ for (i = 0; i < skb->len; i += 32) {
+ unsigned int len = min(skb->len - i, 32U);
+
+ hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+ buffer, sizeof(buffer), false);
+ netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
}
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a73600dceb8b..3c85a0885f9b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -88,6 +88,7 @@ do { \
#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
#define SLIVER_SIZE 0x40
#define CPSW1_HOST_PORT_OFFSET 0x028
@@ -352,6 +353,27 @@ struct cpsw_hw_stats {
u32 rxdmaoverruns;
};
+struct cpsw_slave_data {
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -365,12 +387,12 @@ struct cpsw_slave {
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
{
- return __raw_readl(slave->regs + offset);
+ return readl_relaxed(slave->regs + offset);
}
static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
{
- __raw_writel(val, slave->regs + offset);
+ writel_relaxed(val, slave->regs + offset);
}
struct cpsw_vector {
@@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
static void cpsw_intr_enable(struct cpsw_common *cpsw)
{
- __raw_writel(0xFF, &cpsw->wr_regs->tx_en);
- __raw_writel(0xFF, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, true);
return;
@@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
static void cpsw_intr_disable(struct cpsw_common *cpsw)
{
- __raw_writel(0, &cpsw->wr_regs->tx_en);
- __raw_writel(0, &cpsw->wr_regs->rx_en);
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
cpdma_ctlr_int_ctrl(cpsw->dma, false);
return;
@@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg)
{
unsigned long timeout = jiffies + HZ;
- __raw_writel(1, reg);
+ writel_relaxed(1, reg);
do {
cpu_relax();
- } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
- WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
}
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void cpsw_set_slave_mac(struct cpsw_slave *slave,
struct cpsw_priv *priv)
{
@@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
if (mac_control != slave->mac_control) {
phy_print_status(phy);
- __raw_writel(mac_control, &slave->sliver->mac_control);
+ writel_relaxed(mac_control, &slave->sliver->mac_control);
}
slave->mac_control = mac_control;
@@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
soft_reset_slave(slave);
/* setup priority mapping */
- __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+ writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
@@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
- __raw_writel(CPDMA_TX_PRIORITY_MAP,
- &cpsw->host_port_regs->cpdma_tx_pri_map);
- __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* initialize shared resources for every ndev */
if (!cpsw->usage_count) {
/* disable priority elevation */
- __raw_writel(0, &cpsw->regs->ptype);
+ writel_relaxed(0, &cpsw->regs->ptype);
/* enable statistics collection only on all ports */
- __raw_writel(0x7, &cpsw->regs->stat_port_en);
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
/* Enable internal fifo flow control */
writel(0x7, &cpsw->regs->flow_control);
@@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
- __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
{
- int (*poll)(struct napi_struct *, int);
struct cpsw_common *cpsw = priv->cpsw;
void (*handler)(void *, int, int);
struct netdev_queue *queue;
@@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
ch = &cpsw->rx_ch_num;
vec = cpsw->rxv;
handler = cpsw_rx_handler;
- poll = cpsw_rx_poll;
} else {
ch = &cpsw->tx_ch_num;
vec = cpsw->txv;
handler = cpsw_tx_handler;
- poll = cpsw_tx_poll;
}
while (*ch < ch_num) {
@@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(priv->dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_dma_ret;
+ }
+
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
- if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
- dev_err(priv->dev, "error initializing dma channels\n");
- ret = -ENOMEM;
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(priv->dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
goto clean_dma_ret;
}
ale_params.dev = &pdev->dev;
ale_params.ale_ageout = ale_ageout;
ale_params.ale_entries = data->ale_entries;
- ale_params.ale_ports = data->slaves;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
cpsw->ale = cpsw_ale_create(&ale_params);
if (!cpsw->ale) {
@@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
if (IS_ERR(cpsw->cpts)) {
ret = PTR_ERR(cpsw->cpts);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
ret = ndev->irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
@@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret) {
dev_err(priv->dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
if (cpsw->data.dual_emac) {
@@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 1);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[0] = irq;
@@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
/* TX IRQ */
irq = platform_get_irq(pdev, 2);
if (irq < 0) {
ret = irq;
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw->irqs_table[1] = irq;
@@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev)
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_ale_ret;
+ goto clean_dma_ret;
}
cpsw_notice(priv, probe,
@@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_unregister_netdev_ret:
unregister_netdev(ndev);
-clean_ale_ret:
- cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
@@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpts_release(cpsw->cpts);
- cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 6c3037aa2cd3..cf111db3dc27 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -17,26 +17,9 @@
#include <linux/if_ether.h>
#include <linux/phy.h>
-struct cpsw_slave_data {
- struct device_node *phy_node;
- char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
- u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-};
-
-struct cpsw_platform_data {
- struct cpsw_slave_data *slave_data;
- u32 ss_reg_ofs; /* Subsystem control register offset */
- u32 channels; /* number of cpdma channels (symmetric) */
- u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
- u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
-};
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index b432a75fb874..93dc05c194d3 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
- __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+ writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- ale_entry[i] = __raw_readl(ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
return idx;
}
@@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
WARN_ON(idx > ale->params.ale_entries);
for (i = 0; i < ALE_ENTRY_WORDS; i++)
- __raw_writel(ale_entry[i], ale->params.ale_regs +
- ALE_TABLE + 4 * i);
+ writel_relaxed(ale_entry[i], ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
- __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
- ALE_TABLE_CONTROL);
+ writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+ ALE_TABLE_CONTROL);
return idx;
}
@@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
mask = BITMASK(info->bits);
@@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset);
+ tmp = readl_relaxed(ale->params.ale_regs + offset);
tmp = (tmp & ~(mask << shift)) | (value << shift);
- __raw_writel(tmp, ale->params.ale_regs + offset);
+ writel_relaxed(tmp, ale->params.ale_regs + offset);
return 0;
}
@@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
if (info->port_offset == 0 && info->port_shift == 0)
port = 0; /* global, port is a dont care */
- if (port < 0 || port > ale->params.ale_ports)
+ if (port < 0 || port >= ale->params.ale_ports)
return -EINVAL;
offset = info->offset + (port * info->port_offset);
shift = info->shift + (port * info->port_shift);
- tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+ tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
@@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t)
void cpsw_ale_start(struct cpsw_ale *ale)
{
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+ del_timer_sync(&ale->timer);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+ struct cpsw_ale *ale;
u32 rev, ale_entries;
- rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+ ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+ if (!ale)
+ return NULL;
+
+ ale->params = *params;
+ ale->ageout = ale->params.ale_ageout * HZ;
+
+ rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
if (!ale->params.major_ver_mask)
ale->params.major_ver_mask = 0xff;
ale->version =
@@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale)
if (!ale->params.ale_entries) {
ale_entries =
- __raw_readl(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+ ALE_STATUS_SIZE_MASK;
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale)
"ALE Table size %ld\n", ale->params.ale_entries);
/* set default bits for existing h/w */
- ale->port_mask_bits = 3;
- ale->port_num_bits = 2;
- ale->vlan_field_bits = 3;
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = order_base_2(ale->params.ale_ports);
+ ale->vlan_field_bits = ale->params.ale_ports;
/* Set defaults override for ALE on NetCP NU switch and for version
* 1R3
@@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = ale->params.ale_ports - 1;
- ale->vlan_field_bits = ale->params.ale_ports;
- } else if (ale->version == ALE_VERSION_1R3) {
- ale->port_mask_bits = ale->params.ale_ports;
- ale->port_num_bits = 3;
- ale->vlan_field_bits = ale->params.ale_ports;
}
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
- cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
-
- timer_setup(&ale->timer, cpsw_ale_timer, 0);
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
-
-void cpsw_ale_stop(struct cpsw_ale *ale)
-{
- del_timer_sync(&ale->timer);
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
-
-struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
-{
- struct cpsw_ale *ale;
-
- ale = kzalloc(sizeof(*ale), GFP_KERNEL);
- if (!ale)
- return NULL;
-
- ale->params = *params;
- ale->ageout = ale->params.ale_ageout * HZ;
-
return ale;
}
EXPORT_SYMBOL_GPL(cpsw_ale_create);
-int cpsw_ale_destroy(struct cpsw_ale *ale)
-{
- if (!ale)
- return -EINVAL;
- cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
- kfree(ale);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
-
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
int i;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 25d24e8d0904..d4fe9016429b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -100,7 +100,6 @@ enum cpsw_ale_port_state {
#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
-int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index e4d6edf387b3..6f9173ff9414 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
if (__chan_linear(chan_num) >= ctlr->num_chan)
- return NULL;
+ return ERR_PTR(-EINVAL);
chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bb561856af5..abceea802ea1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
-static int match_first_device(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "davinci_mdio", 12);
-}
-
/**
* emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- phy = bus_find_device(&mdio_bus_type, NULL, NULL,
- match_first_device);
+ phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+ "davinci_mdio");
if (phy) {
priv->phy_id = dev_name(phy);
if (!priv->phy_id || !*priv->phy_id)
@@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
emac_tx_handler, 0);
+ if (IS_ERR(priv->txchan)) {
+ dev_err(&pdev->dev, "error initializing tx dma channel\n");
+ rc = PTR_ERR(priv->txchan);
+ goto no_cpdma_chan;
+ }
+
priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
emac_rx_handler, 1);
- if (WARN_ON(!priv->txchan || !priv->rxchan)) {
- rc = -ENOMEM;
+ if (IS_ERR(priv->rxchan)) {
+ dev_err(&pdev->dev, "error initializing rx dma channel\n");
+ rc = PTR_ERR(priv->rxchan);
goto no_cpdma_chan;
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c746e4af..f5a7eb22d0f5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* warning!!!! We are retrieving the virtual ptr in the sw_data
* field as a 32bit value. Will not work on 64bit machines
*/
- page = (struct page *)GET_SW_DATA0(desc);
+ page = (struct page *)GET_SW_DATA0(ndesc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index e831c49713ee..56dbc0b9fedc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -27,6 +27,7 @@
#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "netcp.h"
#include "cpts.h"
@@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = {
.get_ts_info = keystone_get_ts_info,
};
-#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
- ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
-
static void gbe_set_slave_mac(struct gbe_slave *slave,
struct gbe_intf *gbe_intf)
{
@@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
del_timer_sync(&gbe_dev->timer);
cpts_release(gbe_dev->cpts);
cpsw_ale_stop(gbe_dev->ale);
- cpsw_ale_destroy(gbe_dev->ale);
netcp_txpipe_close(&gbe_dev->tx_pipe);
free_secondary_ports(gbe_dev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02a6bb6..b919e89a9b93 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -1638,19 +1652,16 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
-static void __net_exit geneve_exit_net(struct net *net)
+static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
struct net_device *dev, *aux;
- LIST_HEAD(list);
-
- rtnl_lock();
/* gather any geneve devices that were moved into this ns */
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, &list);
+ unregister_netdevice_queue(dev, head);
/* now gather any other geneve devices that were created in this ns */
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@ -1658,18 +1669,29 @@ static void __net_exit geneve_exit_net(struct net *net)
* to the list by the previous loop.
*/
if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, &list);
+ unregister_netdevice_queue(geneve->dev, head);
}
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
+}
+
+static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ geneve_destroy_tunnels(net, &list);
+
/* unregister the devices gathered above */
unregister_netdevice_many(&list);
rtnl_unlock();
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit = geneve_exit_net,
+ .exit_batch = geneve_exit_batch_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
rrpriv->info_dma);
rrpriv->info = NULL;
- free_irq(pdev->irq, dev);
spin_unlock_irqrestore(&rrpriv->lock, flags);
+ free_irq(pdev->irq, dev);
return 0;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 88ddfb92122b..0db3bd1ea06f 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
- int ring_size;
u32 num_chn;
u32 send_sections;
u32 recv_sections;
@@ -188,18 +187,22 @@ struct rndis_message;
struct netvsc_device;
struct net_device_context;
+extern u32 netvsc_ring_bytes;
+extern struct reciprocal_value netvsc_ring_reciprocal;
+
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
void netvsc_device_remove(struct hv_device *device);
-int netvsc_send(struct net_device_context *ndc,
+int netvsc_send(struct net_device *net,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *page_buffer,
struct sk_buff *skb);
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *nvdev,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *key);
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen);
@@ -635,14 +637,27 @@ struct nvsp_message {
#define NETVSC_MTU 65535
#define NETVSC_MTU_MIN ETH_MIN_MTU
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
-#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
+/* Max buffer sizes allowed by a host */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16)
+
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
+#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024)
+
#define NETVSC_INVALID_INDEX -1
#define NETVSC_SEND_SECTION_SIZE 6144
#define NETVSC_RECV_SECTION_SIZE 1728
+/* Default size of TX buf: 1MB, RX buf: 16MB */
+#define NETVSC_MIN_TX_SECTIONS 10
+#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \
+ / NETVSC_SEND_SECTION_SIZE)
+#define NETVSC_MIN_RX_SECTIONS 10
+#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \
+ / NETVSC_RECV_SECTION_SIZE)
+
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0
@@ -690,6 +705,7 @@ struct netvsc_ethtool_stats {
unsigned long tx_busy;
unsigned long tx_send_full;
unsigned long rx_comp_busy;
+ unsigned long rx_no_memory;
unsigned long stop_queue;
unsigned long wake_queue;
};
@@ -804,13 +820,9 @@ struct netvsc_device {
struct rndis_device *extension;
- int ring_size;
-
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- atomic_t open_cnt;
-
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
struct rcu_head rcu;
@@ -1425,32 +1437,6 @@ struct rndis_message {
(sizeof(msg) + (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container)))
-/* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->info_buf_offset)
-
-/* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->status_buf_offset)
-
-/* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->oob_data_offset)
-
-/* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(msg) \
- (((unsigned char *)(msg)) + msg->per_pkt_info_offset)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
- ((void *) &rndis_msg->msg)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
- ((void *) rndis_msg)
-
-
-
#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index bfc79698b8f4..17e529af79dc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
@@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
- atomic_set(&net_device->open_cnt, 0);
+
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device,
buf_size = device_info->recv_sections * device_info->recv_section_size;
buf_size = roundup(buf_size, PAGE_SIZE);
+ /* Legacy hosts only allow smaller receive buffer */
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ buf_size = min_t(unsigned int, buf_size,
+ NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
+
net_device->recv_buf = vzalloc(buf_size);
if (!net_device->recv_buf) {
netdev_err(ndev,
@@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device)
* Get the percentage of available bytes to write in the ring.
* The return value is in range from 0 to 100.
*/
-static inline u32 hv_ringbuf_avail_percent(
- struct hv_ring_buffer_info *ring_info)
+static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
{
- u32 avail_read, avail_write;
-
- hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+ u32 avail_write = hv_get_bytes_to_write(ring_info);
- return avail_write * 100 / ring_info->ring_datasize;
+ return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
}
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
@@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
return NETVSC_INVALID_INDEX;
}
-static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
- unsigned int section_index,
- u32 pend_size,
- struct hv_netvsc_packet *packet,
- struct rndis_message *rndis_msg,
- struct hv_page_buffer *pb,
- struct sk_buff *skb)
+static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ u32 pend_size,
+ struct hv_netvsc_packet *packet,
+ struct rndis_message *rndis_msg,
+ struct hv_page_buffer *pb,
+ bool xmit_more)
{
char *start = net_device->send_buf;
char *dest = start + (section_index * net_device->send_section_size)
+ pend_size;
int i;
- u32 msg_size = 0;
u32 padding = 0;
- u32 remain = packet->total_data_buflen % net_device->pkt_align;
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
packet->page_buf_cnt;
+ u32 remain;
/* Add padding */
- if (skb->xmit_more && remain && !packet->cp_partial) {
+ remain = packet->total_data_buflen & (net_device->pkt_align - 1);
+ if (xmit_more && remain) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
packet->total_data_buflen += padding;
@@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
u32 len = pb[i].len;
memcpy(dest, (src + offset), len);
- msg_size += len;
dest += len;
}
- if (padding) {
+ if (padding)
memset(dest, 0, padding);
- msg_size += padding;
- }
-
- return msg_size;
}
static inline int netvsc_send_pkt(
@@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
}
/* RCU already held by caller */
-int netvsc_send(struct net_device_context *ndev_ctx,
+int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *net_device
= rcu_dereference_bh(ndev_ctx->nvdev);
struct hv_device *device = ndev_ctx->device_ctx;
@@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL;
- bool try_batch;
- bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
+ bool try_batch, xmit_more;
/* If device is rescinded, return error and packet will get dropped. */
if (unlikely(!net_device || net_device->destroy))
@@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
}
}
+ /* Keep aggregating only if stack says more data is coming
+ * and not doing mixed modes send and not flow blocked
+ */
+ xmit_more = skb->xmit_more &&
+ !packet->cp_partial &&
+ !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
if (section_index != NETVSC_INVALID_INDEX) {
netvsc_copy_to_send_buf(net_device,
section_index, msd_len,
- packet, rndis_msg, pb, skb);
+ packet, rndis_msg, pb, xmit_more);
packet->send_buf_index = section_index;
@@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
- if (xmit_more && !packet->cp_partial) {
+ if (xmit_more) {
msdp->skb = skb;
msdp->pkt = packet;
msdp->count++;
@@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev,
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
/* Pass it to the upper layer */
- status = rndis_filter_receive(ndev, net_device, device,
+ status = rndis_filter_receive(ndev, net_device,
channel, data, buflen);
}
@@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
- int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
net_device_ctx->tx_table[i] = 0;
- net_device->ring_size = ring_size;
-
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
@@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
- ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
- ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb,
- net_device->chan_table);
+ ret = vmbus_open(device->channel, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
+ netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5129647d420c..c5584c2d440e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
+#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
@@ -46,17 +47,15 @@
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
-#define NETVSC_MIN_TX_SECTIONS 10
-#define NETVSC_DEFAULT_TX 192 /* ~1M */
-#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
-#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
-static int ring_size = 128;
-module_param(ring_size, int, S_IRUGO);
+static unsigned int ring_size __ro_after_init = 128;
+module_param(ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+unsigned int netvsc_ring_bytes __ro_after_init;
+struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -174,17 +173,15 @@ out:
return ret;
}
-static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
- int pkt_type)
+static inline void *init_ppi_data(struct rndis_message *msg,
+ u32 ppi_size, u32 pkt_type)
{
- struct rndis_packet *rndis_pkt;
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
struct rndis_per_packet_info *ppi;
- rndis_pkt = &msg->msg.pkt;
rndis_pkt->data_offset += ppi_size;
-
- ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
- rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+ ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+ + rndis_pkt->per_pkt_info_len;
ppi->size = ppi_size;
ppi->type = pkt_type;
@@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
rndis_pkt->per_pkt_info_len += ppi_size;
- return ppi;
+ return ppi + 1;
}
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
@@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
- struct rndis_packet *rndis_pkt;
struct net_device *vf_netdev;
u32 rndis_msg_size;
- struct rndis_per_packet_info *ppi;
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
@@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg = (struct rndis_message *)skb->head;
- memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
-
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
- rndis_pkt = &rndis_msg->msg.pkt;
- rndis_pkt->data_offset = sizeof(struct rndis_packet);
- rndis_pkt->data_len = packet->total_data_buflen;
- rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+ rndis_msg->msg.pkt = (struct rndis_packet) {
+ .data_offset = sizeof(struct rndis_packet),
+ .data_len = packet->total_data_buflen,
+ .per_pkt_info_offset = sizeof(struct rndis_packet),
+ };
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
hash = skb_get_hash_raw(skb);
if (hash != 0 && net->real_num_tx_queues > 1) {
+ u32 *hash_info;
+
rndis_msg_size += NDIS_HASH_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
- NBL_HASH_VALUE);
- *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+ hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+ NBL_HASH_VALUE);
+ *hash_info = hash;
}
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
- IEEE_8021Q_INFO);
+ vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+ IEEE_8021Q_INFO);
- vlan = (void *)ppi + ppi->ppi_offset;
+ vlan->value = 0;
vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT;
@@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
- TCP_LARGESEND_PKTINFO);
-
- lso_info = (void *)ppi + ppi->ppi_offset;
+ lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+ lso_info->value = 0;
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
@@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
- TCPIP_CHKSUM_PKTINFO);
-
- csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
- ppi->ppi_offset);
+ csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+ csum_info->value = 0;
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
@@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
+ ret = netvsc_send(net, packet, rndis_msg, pb, skb);
if (likely(ret == 0))
return NETDEV_TX_OK;
@@ -658,22 +652,14 @@ no_memory:
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
- struct net_device *net;
- struct net_device_context *ndev_ctx;
+ struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_reconfig *event;
unsigned long flags;
- net = hv_get_drvdata(device_obj);
-
- if (!net)
- return;
-
- ndev_ctx = netdev_priv(net);
-
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
@@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct net_device *net,
+ struct netvsc_device *net_device,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *net_device;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- struct netvsc_channel *nvchan;
+ struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- rcu_read_lock();
- net_device = rcu_dereference(net_device_ctx->nvdev);
- if (unlikely(!net_device))
- goto drop;
-
- nvchan = &net_device->chan_table[q_idx];
-
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
csum_info, vlan, data, len);
if (unlikely(!skb)) {
-drop:
- ++net->stats.rx_dropped;
+ ++net_device_ctx->eth_stats.rx_no_memory;
rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@@ -804,8 +782,6 @@ drop:
u64_stats_update_end(&rx_stats->syncp);
napi_gro_receive(&nvchan->napi, skb);
- rcu_read_unlock();
-
return 0;
}
@@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
- device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
@@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
rndis_filter_close(nvdev);
memset(&device_info, 0, sizeof(device_info));
- device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
@@ -1133,12 +1107,13 @@ static const struct {
u16 offset;
} netvsc_stats[] = {
{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
- { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+ { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+ { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
}, vf_stats[] = {
@@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
- device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
@@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
- device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
@@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void)
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
- pr_info("Increased ring_size to %d (min allowed)\n",
+ pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
- ret = vmbus_driver_register(&netvsc_drv);
+ netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
+ ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 7b637c7dd1e5..c3ca191fea7f 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev,
kfree(req);
}
-static void dump_rndis_message(struct hv_device *hv_dev,
+static void dump_rndis_message(struct net_device *netdev,
const struct rndis_message *rndis_msg)
{
- struct net_device *netdev = hv_get_drvdata(hv_dev);
-
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
@@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
int ret;
/* Setup the packet to send it */
@@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
}
rcu_read_lock_bh();
- ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
+ ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
rcu_read_unlock_bh();
return ret;
@@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
}
static int rndis_filter_receive_data(struct net_device *ndev,
+ struct netvsc_device *nvdev,
struct rndis_device *dev,
struct rndis_message *msg,
struct vmbus_channel *channel,
@@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev,
*/
data = (void *)((unsigned long)data + data_offset);
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(ndev, channel,
+
+ return netvsc_recv_callback(ndev, nvdev, channel,
data, rndis_pkt->data_len,
csum_info, vlan);
}
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen)
{
@@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev,
}
if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(dev, rndis_msg);
+ dump_rndis_message(ndev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+ return rndis_filter_receive_data(ndev, net_dev,
+ rndis_dev, rndis_msg,
channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
@@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev,
case RNDIS_MSG_INDICATE:
/* notification msgs */
- netvsc_linkstatus_callback(dev, rndis_msg);
+ netvsc_linkstatus_callback(ndev, rndis_msg);
break;
default:
netdev_err(ndev,
@@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
- ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
- nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+ ret = vmbus_open(new_sc, netvsc_ring_bytes,
+ netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
if (ret == 0)
napi_enable(&nvchan->napi);
@@ -1222,7 +1221,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
- const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
int i, ret;
@@ -1291,14 +1289,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
- /*
- * We will limit the VRSS channels to the number CPUs in the NUMA node
- * the primary channel is currently bound to.
- *
- * This also guarantees that num_possible_rss_qs <= num_online_cpus
- */
- node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
- num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
+ /* This guarantees that num_possible_rss_qs <= num_online_cpus */
+ num_possible_rss_qs = min_t(u32, num_online_cpus(),
rsscap.num_recv_que);
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
@@ -1362,9 +1354,6 @@ int rndis_filter_open(struct netvsc_device *nvdev)
if (!nvdev)
return -EINVAL;
- if (atomic_inc_return(&nvdev->open_cnt) != 1)
- return 0;
-
return rndis_filter_open_device(nvdev->extension);
}
@@ -1373,13 +1362,12 @@ int rndis_filter_close(struct netvsc_device *nvdev)
if (!nvdev)
return -EINVAL;
- if (atomic_dec_return(&nvdev->open_cnt) != 0)
- return 0;
-
return rndis_filter_close_device(nvdev->extension);
}
bool rndis_filter_opened(const struct netvsc_device *nvdev)
{
- return atomic_read(&nvdev->open_cnt) > 0;
+ const struct rndis_device *dev = nvdev->extension;
+
+ return dev->state == RNDIS_DEV_DATAINITIALIZED;
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 400fdbd3a120..64f1b1e77bc0 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1,7 +1,7 @@
/*
* Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver
*
- * Copyright 2009-2015 Analog Devices Inc.
+ * Copyright 2009-2017 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*
@@ -344,12 +344,18 @@ static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status,
return ret;
}
-static int adf7242_wait_ready(struct adf7242_local *lp, int line)
+static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line)
{
return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY,
STAT_RC_READY | STAT_SPI_READY, line);
}
+static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line)
+{
+ return adf7242_wait_status(lp, STAT_SPI_READY,
+ STAT_SPI_READY, line);
+}
+
static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
{
u8 *buf = lp->buf;
@@ -369,7 +375,7 @@ static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
buf[0] = CMD_SPI_PKT_WR;
@@ -401,7 +407,7 @@ static int adf7242_read_fbuf(struct adf7242_local *lp,
spi_message_add_tail(&xfer_head, &msg);
spi_message_add_tail(&xfer_buf, &msg);
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
if (packet_read) {
@@ -432,7 +438,7 @@ static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data)
.rx_buf = lp->buf_read_rx,
};
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr);
@@ -462,7 +468,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
{
int status;
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_spi_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr);
@@ -484,7 +490,7 @@ static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd)
dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd);
if (cmd != CMD_RC_PC_RESET_NO_WAIT)
- adf7242_wait_ready(lp, __LINE__);
+ adf7242_wait_rc_ready(lp, __LINE__);
mutex_lock(&lp->bmux);
lp->buf_cmd = cmd;
@@ -557,6 +563,22 @@ static int adf7242_verify_firmware(struct adf7242_local *lp,
return 0;
}
+static void adf7242_clear_irqstat(struct adf7242_local *lp)
+{
+ adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX |
+ IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT |
+ IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA);
+}
+
+static int adf7242_cmd_rx(struct adf7242_local *lp)
+{
+ /* Wait until the ACK is sent */
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
+ adf7242_clear_irqstat(lp);
+
+ return adf7242_cmd(lp, CMD_RC_RX);
+}
+
static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
{
struct adf7242_local *lp = hw->priv;
@@ -660,7 +682,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
struct adf7242_local *lp = hw->priv;
adf7242_cmd(lp, CMD_RC_PHY_RDY);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
enable_irq(lp->spi->irq);
set_bit(FLAG_START, &lp->flags);
@@ -671,10 +693,10 @@ static void adf7242_stop(struct ieee802154_hw *hw)
{
struct adf7242_local *lp = hw->priv;
+ disable_irq(lp->spi->irq);
adf7242_cmd(lp, CMD_RC_IDLE);
clear_bit(FLAG_START, &lp->flags);
- disable_irq(lp->spi->irq);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
}
static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -789,9 +811,12 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
struct adf7242_local *lp = hw->priv;
int ret;
+ /* ensure existing instances of the IRQ handler have completed */
+ disable_irq(lp->spi->irq);
set_bit(FLAG_XMIT, &lp->flags);
reinit_completion(&lp->tx_complete);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
+ adf7242_clear_irqstat(lp);
ret = adf7242_write_fbuf(lp, skb->data, skb->len);
if (ret)
@@ -800,6 +825,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
ret = adf7242_cmd(lp, CMD_RC_CSMACA);
if (ret)
goto err;
+ enable_irq(lp->spi->irq);
ret = wait_for_completion_interruptible_timeout(&lp->tx_complete,
HZ / 10);
@@ -822,7 +848,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
err:
clear_bit(FLAG_XMIT, &lp->flags);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return ret;
}
@@ -846,7 +872,7 @@ static int adf7242_rx(struct adf7242_local *lp)
skb = dev_alloc_skb(len);
if (!skb) {
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return -ENOMEM;
}
@@ -854,14 +880,14 @@ static int adf7242_rx(struct adf7242_local *lp)
ret = adf7242_read_fbuf(lp, data, len, true);
if (ret < 0) {
kfree_skb(skb);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
return ret;
}
lqi = data[len - 2];
lp->rssi = data[len - 1];
- adf7242_cmd(lp, CMD_RC_RX);
+ ret = adf7242_cmd_rx(lp);
skb_trim(skb, len - 2); /* Don't put RSSI/LQI or CRC into the frame */
@@ -870,7 +896,7 @@ static int adf7242_rx(struct adf7242_local *lp)
dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n",
__func__, ret, (int)len, (int)lqi, lp->rssi);
- return 0;
+ return ret;
}
static const struct ieee802154_ops adf7242_ops = {
@@ -888,7 +914,7 @@ static const struct ieee802154_ops adf7242_ops = {
.set_cca_ed_level = adf7242_set_cca_ed_level,
};
-static void adf7242_debug(u8 irq1)
+static void adf7242_debug(struct adf7242_local *lp, u8 irq1)
{
#ifdef DEBUG
u8 stat;
@@ -906,9 +932,12 @@ static void adf7242_debug(u8 irq1)
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
- dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n",
+ dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n",
__func__, stat,
+ stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+ stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+ stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -923,20 +952,20 @@ static irqreturn_t adf7242_isr(int irq, void *data)
unsigned int xmit;
u8 irq1;
- adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
-
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1);
if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
__func__, irq1);
- adf7242_debug(irq1);
+ adf7242_debug(lp, irq1);
xmit = test_bit(FLAG_XMIT, &lp->flags);
if (xmit && (irq1 & IRQ_CSMA_CA)) {
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+ RC_STATUS_MASK, __LINE__);
+
if (ADF7242_REPORT_CSMA_CA_STAT) {
u8 astat;
@@ -957,6 +986,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
lp->tx_stat = SUCCESS;
}
complete(&lp->tx_complete);
+ adf7242_clear_irqstat(lp);
} else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) &&
(irq1 & IRQ_FRAME_VALID)) {
adf7242_rx(lp);
@@ -965,16 +995,19 @@ static irqreturn_t adf7242_isr(int irq, void *data)
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n",
__func__, __LINE__, irq1);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
- adf7242_cmd(lp, CMD_RC_RX);
+ adf7242_cmd_rx(lp);
} else {
/* This can only be xmit without IRQ, likely a RX packet.
* we get an TX IRQ shortly - do nothing or let the xmit
* timeout handle this
*/
+
dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n",
__func__, __LINE__, irq1, xmit);
+ adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+ RC_STATUS_MASK, __LINE__);
complete(&lp->tx_complete);
+ adf7242_clear_irqstat(lp);
}
return IRQ_HANDLED;
@@ -994,7 +1027,7 @@ static int adf7242_soft_reset(struct adf7242_local *lp, int line)
adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous);
adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be,
lp->max_cca_retries);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
if (test_bit(FLAG_START, &lp->flags)) {
enable_irq(lp->spi->irq);
@@ -1060,7 +1093,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
adf7242_write_reg(lp, REG_IRQ1_EN0, 0);
adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA);
- adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+ adf7242_clear_irqstat(lp);
adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF);
adf7242_cmd(lp, CMD_RC_IDLE);
@@ -1086,8 +1119,11 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
- seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat,
+ seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat,
+ stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+ stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+ stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -1257,12 +1293,14 @@ static int adf7242_remove(struct spi_device *spi)
static const struct of_device_id adf7242_of_match[] = {
{ .compatible = "adi,adf7242", },
+ { .compatible = "adi,adf7241", },
{ },
};
MODULE_DEVICE_TABLE(of, adf7242_of_match);
static const struct spi_device_id adf7242_device_id[] = {
{ .name = "adf7242", },
+ { .name = "adf7241", },
{ },
};
MODULE_DEVICE_TABLE(spi, adf7242_device_id);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 7900ed066d8a..e412dfdda7dd 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2638,12 +2638,12 @@ static long ca8210_test_int_ioctl(
*
* Return: set of poll return flags
*/
-static unsigned int ca8210_test_int_poll(
+static __poll_t ca8210_test_int_poll(
struct file *filp,
struct poll_table_struct *ptable
)
{
- unsigned int return_flags = 0;
+ __poll_t return_flags = 0;
struct ca8210_priv *priv = filp->private_data;
poll_wait(filp, &priv->test.readq, ptable);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 77cc4fbaeace..c1f008fe4e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -315,13 +315,13 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
*pskb = skb;
}
- ipvlan_skb_crossing_ns(skb, dev);
if (local) {
skb->pkt_type = PACKET_HOST;
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
+ skb->dev = dev;
ret = RX_HANDLER_ANOTHER;
success = true;
}
@@ -586,7 +586,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return NET_XMIT_SUCCESS;
}
- ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+ skb->dev = ipvlan->phy_dev;
return dev_queue_xmit(skb);
}
@@ -664,8 +664,6 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
struct ethhdr *eth = eth_hdr(skb);
rx_handler_result_t ret = RX_HANDLER_PASS;
- void *lyr3h;
- int addr_type;
if (is_multicast_ether_addr(eth->h_dest)) {
if (ipvlan_external_frame(skb, port)) {
@@ -683,15 +681,8 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
}
}
} else {
- struct ipvl_addr *addr;
-
- lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
- if (!lyr3h)
- return ret;
-
- addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
- if (addr)
- ret = ipvlan_rcv_frame(addr, pskb, false);
+ /* Perform like l3 mode for non-multicast packet */
+ ret = ipvlan_handle_mode_l3(pskb, port);
}
return ret;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 30cb803e2fe5..2469df118fbf 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -850,6 +850,19 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
return ipvlan_del_addr(ipvlan, ip6_addr, true);
}
+static bool ipvlan_is_valid_dev(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ if (!netif_is_ipvlan(dev))
+ return false;
+
+ if (!ipvlan || !ipvlan->port)
+ return false;
+
+ return true;
+}
+
static int ipvlan_addr6_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -857,10 +870,7 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -888,10 +898,7 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
if (in_softirq())
return NOTIFY_DONE;
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -932,10 +939,7 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct in_addr ip4_addr;
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
@@ -961,10 +965,7 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- if (!netif_is_ipvlan(dev))
- return NOTIFY_DONE;
-
- if (!ipvlan || !ipvlan->port)
+ if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1d025ab9568f..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -393,7 +393,10 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
-#define DEFAULT_SAK_LEN 16
+#define MACSEC_GCM_AES_128_SAK_LEN 16
+#define MACSEC_GCM_AES_256_SAK_LEN 32
+
+#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
@@ -2362,15 +2365,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
{
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+ u64 csid;
if (!secy_nest)
return 1;
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_DEFAULT_CIPHER_ID;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto cancel;
+ }
+
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
MACSEC_SECY_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID,
- MACSEC_SECY_ATTR_PAD) ||
+ csid, MACSEC_SECY_ATTR_PAD) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -3015,8 +3029,8 @@ static void macsec_setup(struct net_device *dev)
eth_zero_addr(dev->broadcast);
}
-static void macsec_changelink_common(struct net_device *dev,
- struct nlattr *data[])
+static int macsec_changelink_common(struct net_device *dev,
+ struct nlattr *data[])
{
struct macsec_secy *secy;
struct macsec_tx_sc *tx_sc;
@@ -3056,6 +3070,22 @@ static void macsec_changelink_common(struct net_device *dev,
if (data[IFLA_MACSEC_VALIDATION])
secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+
+ if (data[IFLA_MACSEC_CIPHER_SUITE]) {
+ switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3071,9 +3101,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
- macsec_changelink_common(dev, data);
-
- return 0;
+ return macsec_changelink_common(dev, data);
}
static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3270,8 +3298,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err)
goto unlink;
- if (data)
- macsec_changelink_common(dev, data);
+ if (data) {
+ err = macsec_changelink_common(dev, data);
+ if (err)
+ goto del_dev;
+ }
err = register_macsec_dev(real_dev, dev);
if (err < 0)
@@ -3320,8 +3351,9 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
switch (csid) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
case MACSEC_DEFAULT_CIPHER_ID:
- case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_STD_ICV_LEN)
return -EINVAL;
@@ -3390,12 +3422,24 @@ static int macsec_fill_info(struct sk_buff *skb,
{
struct macsec_secy *secy = &macsec_priv(dev)->secy;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ u64 csid;
+
+ switch (secy->key_len) {
+ case MACSEC_GCM_AES_128_SAK_LEN:
+ csid = MACSEC_DEFAULT_CIPHER_ID;
+ break;
+ case MACSEC_GCM_AES_256_SAK_LEN:
+ csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ break;
+ default:
+ goto nla_put_failure;
+ }
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
+ csid, IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5efd33e..a0f2be81d52e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
+ /* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
+ return err;
destroy_macvlan_port:
- if (create)
+ /* the macvlan port may be freed by macvlan_uninit when fail to register.
+ * so we destroy the macvlan port only when it's valid.
+ */
+ if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000000..09388c06171d
--- /dev/null
+++ b/drivers/net/netdevsim/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NETDEVSIM) += netdevsim.o
+
+netdevsim-objs := \
+ netdev.o \
+
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+netdevsim-objs += \
+ bpf.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
new file mode 100644
index 000000000000..de73c1ff0939
--- /dev/null
+++ b/drivers/net/netdevsim/bpf.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+
+#include "netdevsim.h"
+
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
+
+struct nsim_bpf_bound_prog {
+ struct netdevsim *ns;
+ struct bpf_prog *prog;
+ struct dentry *ddir;
+ const char *state;
+ bool is_loaded;
+ struct list_head l;
+};
+
+#define NSIM_BPF_MAX_KEYS 2
+
+struct nsim_bpf_bound_map {
+ struct netdevsim *ns;
+ struct bpf_offloaded_map *map;
+ struct mutex mutex;
+ struct nsim_map_entry {
+ void *key;
+ void *value;
+ } entry[NSIM_BPF_MAX_KEYS];
+ struct list_head l;
+};
+
+static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
+{
+ const char **str = file->private;
+
+ if (*str)
+ seq_printf(file, "%s\n", *str);
+
+ return 0;
+}
+
+static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private);
+}
+
+static const struct file_operations nsim_bpf_string_fops = {
+ .owner = THIS_MODULE,
+ .open = nsim_debugfs_bpf_string_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static int
+nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ state = env->prog->aux->offload->dev_priv;
+ if (state->ns->bpf_bind_verifier_delay && !insn_idx)
+ msleep(state->ns->bpf_bind_verifier_delay);
+
+ if (insn_idx == env->prog->len - 1)
+ pr_vlog(env, "Hello from netdevsim!\n");
+
+ return 0;
+}
+
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
+ .insn_hook = nsim_bpf_verify_insn,
+};
+
+static bool nsim_xdp_offload_active(struct netdevsim *ns)
+{
+ return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+}
+
+static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!prog || !prog->aux->offload)
+ return;
+
+ state = prog->aux->offload->dev_priv;
+ state->is_loaded = loaded;
+}
+
+static int
+nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
+{
+ nsim_prog_set_loaded(ns->bpf_offloaded, false);
+
+ WARN(!!ns->bpf_offloaded != oldprog,
+ "bad offload state, expected offload %sto be active",
+ oldprog ? "" : "not ");
+ ns->bpf_offloaded = prog;
+ ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
+ nsim_prog_set_loaded(prog, true);
+
+ return 0;
+}
+
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_bpf_offload *cls_bpf = type_data;
+ struct bpf_prog *prog = cls_bpf->prog;
+ struct netdevsim *ns = cb_priv;
+ struct bpf_prog *oldprog;
+
+ if (type != TC_SETUP_CLSBPF) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ns->bpf_tc_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject BPF TC offload");
+ return -EOPNOTSUPP;
+ }
+ /* Note: progs without skip_sw will probably not be dev bound */
+ if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject unbound programs");
+ return -EOPNOTSUPP;
+ }
+
+ if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
+ return -EOPNOTSUPP;
+
+ oldprog = cls_bpf->oldprog;
+
+ /* Don't remove if oldprog doesn't match driver's state */
+ if (ns->bpf_offloaded != oldprog) {
+ oldprog = NULL;
+ if (!cls_bpf->prog)
+ return 0;
+ if (ns->bpf_offloaded) {
+ NSIM_EA(cls_bpf->common.extack,
+ "driver and netdev offload states mismatch");
+ return -EBUSY;
+ }
+ }
+
+ return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
+}
+
+int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
+ return -EBUSY;
+ return 0;
+}
+
+static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (!nsim_xdp_offload_active(ns) && !bpf->prog)
+ return 0;
+ if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
+ NSIM_EA(bpf->extack, "TC program is already loaded");
+ return -EBUSY;
+ }
+
+ return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
+}
+
+static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ int err;
+
+ if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
+ NSIM_EA(bpf->extack, "program loaded with different flags");
+ return -EBUSY;
+ }
+
+ if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
+ NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
+ NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+
+ if (bpf->command == XDP_SETUP_PROG_HW) {
+ err = nsim_xdp_offload_prog(ns, bpf);
+ if (err)
+ return err;
+ }
+
+ if (ns->xdp_prog)
+ bpf_prog_put(ns->xdp_prog);
+
+ ns->xdp_prog = bpf->prog;
+ ns->xdp_flags = bpf->flags;
+
+ if (!bpf->prog)
+ ns->xdp_prog_mode = XDP_ATTACHED_NONE;
+ else if (bpf->command == XDP_SETUP_PROG)
+ ns->xdp_prog_mode = XDP_ATTACHED_DRV;
+ else
+ ns->xdp_prog_mode = XDP_ATTACHED_HW;
+
+ return 0;
+}
+
+static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+ char name[16];
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->ns = ns;
+ state->prog = prog;
+ state->state = "verify";
+
+ /* Program id is not populated yet when we create the state. */
+ sprintf(name, "%u", ns->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+ if (IS_ERR_OR_NULL(state->ddir)) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
+ debugfs_create_file("state", 0400, state->ddir,
+ &state->state, &nsim_bpf_string_fops);
+ debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+
+ list_add_tail(&state->l, &ns->bpf_bound_progs);
+
+ prog->aux->offload->dev_priv = state;
+
+ return 0;
+}
+
+static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ state = prog->aux->offload->dev_priv;
+ WARN(state->is_loaded,
+ "offload state destroyed while program still bound");
+ debugfs_remove_recursive(state->ddir);
+ list_del(&state->l);
+ kfree(state);
+}
+
+static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (bpf->prog && bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
+ return -EINVAL;
+ }
+ if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
+ return -EINVAL;
+ }
+ if (nsim_xdp_offload_active(ns)) {
+ NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int
+nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!bpf->prog)
+ return 0;
+
+ if (!bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+ return -EINVAL;
+ }
+ if (bpf->prog->aux->offload->netdev != ns->netdev) {
+ NSIM_EA(bpf->extack, "program bound to different dev");
+ return -EINVAL;
+ }
+
+ state = bpf->prog->aux->offload->dev_priv;
+ if (WARN_ON(strcmp(state->state, "xlated"))) {
+ NSIM_EA(bpf->extack, "offloading program in bad state");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
+{
+ return e->key && !memcmp(key, e->key, map->key_size);
+}
+
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
+ if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ if (!nmap->entry[idx].key)
+ return -ENOMEM;
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ if (!nmap->entry[idx].value) {
+ kfree(nmap->entry[idx].key);
+ nmap->entry[idx].key = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx = -ENOENT;
+
+ mutex_lock(&nmap->mutex);
+
+ if (key)
+ idx = nsim_map_key_find(offmap, key);
+ if (idx == -ENOENT)
+ idx = 0;
+ else
+ idx++;
+
+ for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
+ if (nmap->entry[idx].key) {
+ memcpy(next_key, nmap->entry[idx].key,
+ offmap->map.key_size);
+ break;
+ }
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ if (idx == ARRAY_SIZE(nmap->entry))
+ return -ENOENT;
+ return 0;
+}
+
+static int
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0)
+ memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static int
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx, err = 0;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx < 0 && flags == BPF_EXIST) {
+ err = idx;
+ goto exit_unlock;
+ }
+ if (idx >= 0 && flags == BPF_NOEXIST) {
+ err = -EEXIST;
+ goto exit_unlock;
+ }
+
+ if (idx < 0) {
+ for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
+ if (!nmap->entry[idx].key)
+ break;
+ if (idx == ARRAY_SIZE(nmap->entry)) {
+ err = -E2BIG;
+ goto exit_unlock;
+ }
+
+ err = nsim_map_alloc_elem(offmap, idx);
+ if (err)
+ goto exit_unlock;
+ }
+
+ memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
+ memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
+exit_unlock:
+ mutex_unlock(&nmap->mutex);
+
+ return err;
+}
+
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0) {
+ kfree(nmap->entry[idx].key);
+ kfree(nmap->entry[idx].value);
+ memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
+ .map_get_next_key = nsim_map_get_next_key,
+ .map_lookup_elem = nsim_map_lookup_elem,
+ .map_update_elem = nsim_map_update_elem,
+ .map_delete_elem = nsim_map_delete_elem,
+};
+
+static int
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap;
+ unsigned int i;
+ int err;
+
+ if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
+ offmap->map.map_type != BPF_MAP_TYPE_HASH))
+ return -EINVAL;
+ if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
+ return -ENOMEM;
+ if (offmap->map.map_flags)
+ return -EINVAL;
+
+ nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ if (!nmap)
+ return -ENOMEM;
+
+ offmap->dev_priv = nmap;
+ nmap->ns = ns;
+ nmap->map = offmap;
+ mutex_init(&nmap->mutex);
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ u32 *key;
+
+ err = nsim_map_alloc_elem(offmap, i);
+ if (err)
+ goto err_free;
+ key = nmap->entry[i].key;
+ *key = i;
+ }
+ }
+
+ offmap->dev_ops = &nsim_bpf_map_ops;
+ list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+
+ return 0;
+
+err_free:
+ while (--i) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ kfree(nmap);
+ return err;
+}
+
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ list_del_init(&nmap->l);
+ mutex_destroy(&nmap->mutex);
+ kfree(nmap);
+}
+
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bpf_bound_prog *state;
+ int err;
+
+ ASSERT_RTNL();
+
+ switch (bpf->command) {
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ if (!ns->bpf_bind_accept)
+ return -EOPNOTSUPP;
+
+ err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
+ if (err)
+ return err;
+
+ bpf->verifier.ops = &nsim_bpf_analyzer_ops;
+ return 0;
+ case BPF_OFFLOAD_TRANSLATE:
+ state = bpf->offload.prog->aux->offload->dev_priv;
+
+ state->state = "xlated";
+ return 0;
+ case BPF_OFFLOAD_DESTROY:
+ nsim_bpf_destroy_prog(bpf->offload.prog);
+ return 0;
+ case XDP_QUERY_PROG:
+ bpf->prog_attached = ns->xdp_prog_mode;
+ bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
+ bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
+ return 0;
+ case XDP_SETUP_PROG:
+ err = nsim_setup_prog_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf);
+ case XDP_SETUP_PROG_HW:
+ err = nsim_setup_prog_hw_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ if (!ns->bpf_map_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_map_alloc(ns, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ nsim_bpf_map_free(bpf->offmap);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+ INIT_LIST_HEAD(&ns->bpf_bound_progs);
+ INIT_LIST_HEAD(&ns->bpf_bound_maps);
+
+ debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+ &ns->bpf_offloaded_id);
+
+ ns->bpf_bind_accept = true;
+ debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
+ &ns->bpf_bind_accept);
+ debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
+ &ns->bpf_bind_verifier_delay);
+ ns->ddir_bpf_bound_progs =
+ debugfs_create_dir("bpf_bound_progs", ns->ddir);
+ if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+ return -ENOMEM;
+
+ ns->bpf_tc_accept = true;
+ debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+ &ns->bpf_tc_accept);
+ debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+ &ns->bpf_tc_non_bound_accept);
+ ns->bpf_xdpdrv_accept = true;
+ debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+ &ns->bpf_xdpdrv_accept);
+ ns->bpf_xdpoffload_accept = true;
+ debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+ &ns->bpf_xdpoffload_accept);
+
+ ns->bpf_map_accept = true;
+ debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+ &ns->bpf_map_accept);
+
+ return 0;
+}
+
+void nsim_bpf_uninit(struct netdevsim *ns)
+{
+ WARN_ON(!list_empty(&ns->bpf_bound_progs));
+ WARN_ON(!list_empty(&ns->bpf_bound_maps));
+ WARN_ON(ns->xdp_prog);
+ WARN_ON(ns->bpf_offloaded);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
new file mode 100644
index 000000000000..3fd567928f3d
--- /dev/null
+++ b/drivers/net/netdevsim/netdev.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/pkt_cls.h>
+#include <net/rtnetlink.h>
+
+#include "netdevsim.h"
+
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
+static u32 nsim_dev_id;
+
+static int nsim_num_vf(struct device *dev)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ return ns->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+ .name = DRV_NAME,
+ .dev_name = DRV_NAME,
+ .num_vf = nsim_num_vf,
+};
+
+static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
+{
+ ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
+ GFP_KERNEL);
+ if (!ns->vfconfigs)
+ return -ENOMEM;
+ ns->num_vfs = num_vfs;
+
+ return 0;
+}
+
+static void nsim_vfs_disable(struct netdevsim *ns)
+{
+ kfree(ns->vfconfigs);
+ ns->vfconfigs = NULL;
+ ns->num_vfs = 0;
+}
+
+static ssize_t
+nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netdevsim *ns = to_nsim(dev);
+ unsigned int num_vfs;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num_vfs);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ if (ns->num_vfs == num_vfs)
+ goto exit_good;
+ if (ns->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+
+ if (num_vfs) {
+ ret = nsim_vfs_enable(ns, num_vfs);
+ if (ret)
+ goto exit_unlock;
+ } else {
+ nsim_vfs_disable(ns);
+ }
+exit_good:
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static ssize_t
+nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ return sprintf(buf, "%u\n", ns->num_vfs);
+}
+
+static struct device_attribute nsim_numvfs_attr =
+ __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
+
+static struct attribute *nsim_dev_attrs[] = {
+ &nsim_numvfs_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group nsim_dev_attr_group = {
+ .attrs = nsim_dev_attrs,
+};
+
+static const struct attribute_group *nsim_dev_attr_groups[] = {
+ &nsim_dev_attr_group,
+ NULL,
+};
+
+static void nsim_dev_release(struct device *dev)
+{
+ struct netdevsim *ns = to_nsim(dev);
+
+ nsim_vfs_disable(ns);
+ free_netdev(ns->netdev);
+}
+
+static struct device_type nsim_dev_type = {
+ .groups = nsim_dev_attr_groups,
+ .release = nsim_dev_release,
+};
+
+static int nsim_init(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ ns->netdev = dev;
+ ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+ if (IS_ERR_OR_NULL(ns->ddir))
+ return -ENOMEM;
+
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_debugfs_destroy;
+
+ ns->dev.id = nsim_dev_id++;
+ ns->dev.bus = &nsim_bus;
+ ns->dev.type = &nsim_dev_type;
+ err = device_register(&ns->dev);
+ if (err)
+ goto err_bpf_uninit;
+
+ SET_NETDEV_DEV(dev, &ns->dev);
+
+ return 0;
+
+err_bpf_uninit:
+ nsim_bpf_uninit(ns);
+err_debugfs_destroy:
+ debugfs_remove_recursive(ns->ddir);
+ return err;
+}
+
+static void nsim_uninit(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ debugfs_remove_recursive(ns->ddir);
+ nsim_bpf_uninit(ns);
+}
+
+static void nsim_free(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ device_unregister(&ns->dev);
+ /* netdev and vf state will be freed out of device_release() */
+}
+
+static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_packets++;
+ ns->tx_bytes += skb->len;
+ u64_stats_update_end(&ns->syncp);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void nsim_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int nsim_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
+ new_mtu > NSIM_XDP_MAX_MTU)
+ return -EBUSY;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static void
+nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ns->syncp);
+ stats->tx_bytes = ns->tx_bytes;
+ stats->tx_packets = ns->tx_packets;
+ } while (u64_stats_fetch_retry(&ns->syncp, start));
+}
+
+static int
+nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
+}
+
+static int
+nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
+ ns, ns);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ /* Only refuse multicast addresses, zero address can mean unset/any. */
+ if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+ return -EINVAL;
+ memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+
+ return 0;
+}
+
+static int nsim_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ ns->vfconfigs[vf].vlan = vlan;
+ ns->vfconfigs[vf].qos = qos;
+ ns->vfconfigs[vf].vlan_proto = vlan_proto;
+
+ return 0;
+}
+
+static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ ns->vfconfigs[vf].min_tx_rate = min;
+ ns->vfconfigs[vf].max_tx_rate = max;
+
+ return 0;
+}
+
+static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].spoofchk_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].rss_query_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+ ns->vfconfigs[vf].trusted = val;
+
+ return 0;
+}
+
+static int
+nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ ivi->vf = vf;
+ ivi->linkstate = ns->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = ns->vfconfigs[vf].vlan;
+ ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
+ ivi->qos = ns->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = ns->vfconfigs[vf].trusted;
+ ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+
+ return 0;
+}
+
+static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (vf >= ns->num_vfs)
+ return -EINVAL;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ case IFLA_VF_LINK_STATE_ENABLE:
+ case IFLA_VF_LINK_STATE_DISABLE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ns->vfconfigs[vf].link_state = state;
+
+ return 0;
+}
+
+static int
+nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nsim_setup_tc_block(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nsim_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
+ return nsim_bpf_disable_tc(ns);
+
+ return 0;
+}
+
+static const struct net_device_ops nsim_netdev_ops = {
+ .ndo_init = nsim_init,
+ .ndo_uninit = nsim_uninit,
+ .ndo_start_xmit = nsim_start_xmit,
+ .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = nsim_change_mtu,
+ .ndo_get_stats64 = nsim_get_stats64,
+ .ndo_set_vf_mac = nsim_set_vf_mac,
+ .ndo_set_vf_vlan = nsim_set_vf_vlan,
+ .ndo_set_vf_rate = nsim_set_vf_rate,
+ .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk,
+ .ndo_set_vf_trust = nsim_set_vf_trust,
+ .ndo_get_vf_config = nsim_get_vf_config,
+ .ndo_set_vf_link_state = nsim_set_vf_link_state,
+ .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
+ .ndo_setup_tc = nsim_setup_tc,
+ .ndo_set_features = nsim_set_features,
+ .ndo_bpf = nsim_bpf,
+};
+
+static void nsim_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ eth_hw_addr_random(dev);
+
+ dev->netdev_ops = &nsim_netdev_ops;
+ dev->priv_destructor = nsim_free;
+
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_NOARP;
+ dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
+ IFF_NO_QUEUE;
+ dev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_HW_CSUM |
+ NETIF_F_TSO;
+ dev->hw_features |= NETIF_F_HW_TC;
+ dev->max_mtu = ETH_MAX_MTU;
+}
+
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops nsim_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct netdevsim),
+ .setup = nsim_setup,
+ .validate = nsim_validate,
+};
+
+struct dentry *nsim_ddir;
+
+static int __init nsim_module_init(void)
+{
+ int err;
+
+ nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR_OR_NULL(nsim_ddir))
+ return -ENOMEM;
+
+ err = bus_register(&nsim_bus);
+ if (err)
+ goto err_debugfs_destroy;
+
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_unreg_bus;
+
+ return 0;
+
+err_unreg_bus:
+ bus_unregister(&nsim_bus);
+err_debugfs_destroy:
+ debugfs_remove_recursive(nsim_ddir);
+ return err;
+}
+
+static void __exit nsim_module_exit(void)
+{
+ rtnl_link_unregister(&nsim_link_ops);
+ bus_unregister(&nsim_bus);
+ debugfs_remove_recursive(nsim_ddir);
+}
+
+module_init(nsim_module_init);
+module_exit(nsim_module_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
new file mode 100644
index 000000000000..ea081c10efb8
--- /dev/null
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+#define DRV_NAME "netdevsim"
+
+#define NSIM_XDP_MAX_MTU 4000
+
+#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
+
+struct bpf_prog;
+struct dentry;
+struct nsim_vf_config;
+
+struct netdevsim {
+ struct net_device *netdev;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+
+ struct device dev;
+
+ struct dentry *ddir;
+
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
+
+ struct bpf_prog *bpf_offloaded;
+ u32 bpf_offloaded_id;
+
+ u32 xdp_flags;
+ int xdp_prog_mode;
+ struct bpf_prog *xdp_prog;
+
+ u32 prog_id_gen;
+
+ bool bpf_bind_accept;
+ u32 bpf_bind_verifier_delay;
+ struct dentry *ddir_bpf_bound_progs;
+ struct list_head bpf_bound_progs;
+
+ bool bpf_tc_accept;
+ bool bpf_tc_non_bound_accept;
+ bool bpf_xdpdrv_accept;
+ bool bpf_xdpoffload_accept;
+
+ bool bpf_map_accept;
+ struct list_head bpf_bound_maps;
+};
+
+extern struct dentry *nsim_ddir;
+
+#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_init(struct netdevsim *ns);
+void nsim_bpf_uninit(struct netdevsim *ns);
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int nsim_bpf_disable_tc(struct netdevsim *ns);
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+#else
+static inline int nsim_bpf_init(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_uninit(struct netdevsim *ns)
+{
+}
+
+static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP;
+}
+
+static inline int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline int
+nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline struct netdevsim *to_nsim(struct device *ptr)
+{
+ return container_of(ptr, struct netdevsim, dev);
+}
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 18141c022b13..6fe5dc9201d0 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -68,8 +68,6 @@ static struct phy_driver am79c_driver[] = { {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = am79c_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
} };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6add563..411cf1072bae 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -71,7 +71,6 @@ MODULE_LICENSE("GPL");
struct at803x_priv {
bool phy_reset:1;
- struct gpio_desc *gpiod_reset;
};
struct at803x_context {
@@ -216,60 +215,33 @@ static int at803x_suspend(struct phy_device *phydev)
int value;
int wol_enabled;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, AT803X_INTR_ENABLE);
wol_enabled = value & AT803X_INTR_ENABLE_WOL;
- value = phy_read(phydev, MII_BMCR);
-
if (wol_enabled)
- value |= BMCR_ISOLATE;
+ value = BMCR_ISOLATE;
else
- value |= BMCR_PDOWN;
-
- phy_write(phydev, MII_BMCR, value);
+ value = BMCR_PDOWN;
- mutex_unlock(&phydev->lock);
+ phy_modify(phydev, MII_BMCR, 0, value);
return 0;
}
static int at803x_resume(struct phy_device *phydev)
{
- int value;
-
- mutex_lock(&phydev->lock);
-
- value = phy_read(phydev, MII_BMCR);
- value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
static int at803x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct at803x_priv *priv;
- struct gpio_desc *gpiod_reset;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (phydev->drv->phy_id != ATH8030_PHY_ID)
- goto does_not_require_reset_workaround;
-
- gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod_reset))
- return PTR_ERR(gpiod_reset);
-
- priv->gpiod_reset = gpiod_reset;
-
-does_not_require_reset_workaround:
phydev->priv = priv;
return 0;
@@ -343,14 +315,14 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* cannot recover from by software.
*/
if (phydev->state == PHY_NOLINK) {
- if (priv->gpiod_reset && !priv->phy_reset) {
+ if (phydev->mdio.reset && !priv->phy_reset) {
struct at803x_context context;
at803x_context_save(phydev, &context);
- gpiod_set_value(priv->gpiod_reset, 1);
+ phy_device_reset(phydev, 1);
msleep(1);
- gpiod_set_value(priv->gpiod_reset, 0);
+ phy_device_reset(phydev, 0);
msleep(1);
at803x_context_restore(phydev, &context);
@@ -408,8 +380,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -426,8 +396,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -443,8 +411,6 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 3fe8cc5c177e..6838129839ca 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -136,8 +136,6 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.name = "Broadcom Cygnus PHY",
.features = PHY_GBIT_FEATURES,
.config_init = bcm_cygnus_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index b0492ef2cdaa..cf14613745c9 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -69,8 +69,6 @@ static struct phy_driver bcm63xx_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
}, {
@@ -81,8 +79,6 @@ static struct phy_driver bcm63xx_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
} };
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8b33f688ac8a..421feb8f92fe 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -611,8 +611,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_GBIT_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_resume, \
.get_tunable = bcm7xxx_28nm_get_tunable, \
.set_tunable = bcm7xxx_28nm_set_tunable, \
@@ -630,8 +628,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_BASIC_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_ephy_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.resume = bcm7xxx_28nm_ephy_resume, \
.get_sset_count = bcm_phy_get_sset_count, \
.get_strings = bcm_phy_get_strings, \
@@ -647,8 +643,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.features = PHY_BASIC_FEATURES, \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_config_init, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
.suspend = bcm7xxx_suspend, \
.resume = bcm7xxx_config_init, \
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d7ed69deabfb..3bb6b66dc7bf 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,6 +540,37 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
return err;
}
+struct bcm53xx_phy_priv {
+ u64 *stats;
+};
+
+static int bcm53xx_phy_probe(struct phy_device *phydev)
+{
+ struct bcm53xx_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bcm53xx_phy_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm53xx_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -548,8 +579,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -559,8 +588,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -570,8 +597,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -581,8 +606,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -592,8 +615,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -603,8 +624,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -614,8 +633,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -626,7 +643,6 @@ static struct phy_driver broadcom_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -637,7 +653,6 @@ static struct phy_driver broadcom_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -647,7 +662,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -658,8 +672,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -669,8 +681,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -680,8 +690,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
@@ -691,8 +699,6 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
}, {
@@ -702,10 +708,18 @@ static struct phy_driver broadcom_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+}, {
+ .phy_id = PHY_ID_BCM5395,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5395",
+ .flags = PHY_IS_INTERNAL,
+ .features = PHY_GBIT_FEATURES,
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm53xx_phy_get_stats,
+ .probe = bcm53xx_phy_probe,
} };
module_phy_driver(broadcom_drivers);
@@ -726,6 +740,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM57780, 0xfffffff0 },
{ PHY_ID_BCMAC131, 0xfffffff0 },
{ PHY_ID_BCM5241, 0xfffffff0 },
+ { PHY_ID_BCM5395, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d339c1afea77..c05af00bf4b6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -110,8 +110,6 @@ static struct phy_driver cis820x_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
}, {
@@ -121,8 +119,6 @@ static struct phy_driver cis820x_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
} };
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index e28913d9ea7e..5ee99b3b428c 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -153,7 +153,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -164,7 +163,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -175,7 +173,6 @@ static struct phy_driver dm91xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
}, {
@@ -184,8 +181,6 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cbd629822f04..654f42d00092 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1502,8 +1502,6 @@ static struct phy_driver dp83640_driver = {
.probe = dp83640_probe,
.remove = dp83640_remove,
.config_init = dp83640_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
.ts_info = dp83640_ts_info,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 14335d14e9e4..6e8a2a4f3a6e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -325,8 +325,6 @@ static struct phy_driver dp83822_driver[] = {
.set_wol = dp83822_set_wol,
.ack_interrupt = dp83822_ack_interrupt,
.config_intr = dp83822_config_intr,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = dp83822_suspend,
.resume = dp83822_resume,
},
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 3966d43c5146..cd09c3af2117 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -95,8 +95,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.config_init = genphy_config_init, \
.suspend = genphy_suspend, \
.resume = genphy_resume, \
- .config_aneg = genphy_config_aneg, \
- .read_status = genphy_read_status, \
\
/* IRQ related */ \
.ack_interrupt = dp83848_ack_interrupt, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1ab976cc800..ab58224f897f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -324,8 +324,6 @@ static struct phy_driver dp83867_driver[] = {
.ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index eb5167210681..001fe1df7557 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed)
-{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct fixed_phy *fp;
-
- if (!phydev || phydev->mdio.bus != fmb->mii_bus)
- return -EINVAL;
-
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- write_seqcount_begin(&fp->seqcount);
-#define _UPD(x) if (changed->x) \
- fp->status.x = status->x
- _UPD(link);
- _UPD(speed);
- _UPD(duplex);
- _UPD(pause);
- _UPD(asym_pause);
-#undef _UPD
- fixed_phy_update(fp);
- write_seqcount_end(&fp->seqcount);
- return 0;
- }
- }
-
- return -ENOENT;
-}
-EXPORT_SYMBOL(fixed_phy_update_state);
-
int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_phy_status *status,
int link_gpio)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 567280a72241..791587a49215 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -227,8 +227,6 @@ static struct phy_driver icplus_driver[] = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_GBIT_FEATURES,
.config_init = &ip1001_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -239,8 +237,6 @@ static struct phy_driver icplus_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 55f8c52dd2f1..a11f80cb5388 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -243,7 +243,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -257,7 +256,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -271,7 +269,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -285,7 +282,6 @@ static struct phy_driver xway_gphy[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -298,8 +294,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -312,8 +306,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -326,8 +318,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
@@ -340,8 +330,6 @@ static struct phy_driver xway_gphy[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
.config_intr = xway_gphy_config_intr,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 09d215177fff..c14b254b2879 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -259,8 +259,6 @@ static struct phy_driver lxt97x_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
}, {
@@ -269,8 +267,6 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
@@ -290,7 +286,6 @@ static struct phy_driver lxt97x_driver[] = {
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
- .read_status = genphy_read_status,
} };
module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27df044..22d9bc9c33a4 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -83,7 +83,7 @@
#define MII_88E1121_PHY_MSCR_REG 21
#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5)
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
-#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4)))
+#define MII_88E1121_PHY_MSCR_DELAY_MASK (BIT(5) | BIT(4))
#define MII_88E1121_MISC_TEST 0x1a
#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00
@@ -96,6 +96,17 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_MISC_TEST 0x1b
+#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
+#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
+#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15)
+#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0
+#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14)
+
+#define MII_88E6390_TEMP_SENSOR 0x1c
+#define MII_88E6390_TEMP_SENSOR_MASK 0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -177,27 +188,19 @@ struct marvell_priv {
struct device *hwmon_dev;
};
-static int marvell_get_page(struct phy_device *phydev)
+static int marvell_read_page(struct phy_device *phydev)
{
- return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ return __phy_read(phydev, MII_MARVELL_PHY_PAGE);
}
-static int marvell_set_page(struct phy_device *phydev, int page)
+static int marvell_write_page(struct phy_device *phydev, int page)
{
- return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+ return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
}
-static int marvell_get_set_page(struct phy_device *phydev, int page)
+static int marvell_set_page(struct phy_device *phydev, int page)
{
- int oldpage = marvell_get_page(phydev);
-
- if (oldpage < 0)
- return oldpage;
-
- if (page != oldpage)
- return marvell_set_page(phydev, page);
-
- return 0;
+ return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
}
static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
static int marvell_of_reg_init(struct phy_device *phydev)
{
const __be32 *paddr;
- int len, i, saved_page, current_page, ret;
+ int len, i, saved_page, current_page, ret = 0;
if (!phydev->mdio.dev.of_node)
return 0;
@@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (!paddr || len < (4 * sizeof(*paddr)))
return 0;
- saved_page = marvell_get_page(phydev);
+ saved_page = phy_save_page(phydev);
if (saved_page < 0)
- return saved_page;
+ goto err;
current_page = saved_page;
- ret = 0;
len /= sizeof(*paddr);
for (i = 0; i < len - 3; i += 4) {
u16 page = be32_to_cpup(paddr + i);
@@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (page != current_page) {
current_page = page;
- ret = marvell_set_page(phydev, page);
+ ret = marvell_write_page(phydev, page);
if (ret < 0)
goto err;
}
val = 0;
if (mask) {
- val = phy_read(phydev, reg);
+ val = __phy_read(phydev, reg);
if (val < 0) {
ret = val;
goto err;
@@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev)
}
val |= val_bits;
- ret = phy_write(phydev, reg, val);
+ ret = __phy_write(phydev, reg, val);
if (ret < 0)
goto err;
}
err:
- if (current_page != saved_page) {
- i = marvell_set_page(phydev, saved_page);
- if (ret == 0)
- ret = i;
- }
- return ret;
+ return phy_restore_page(phydev, saved_page, ret);
}
#else
static int marvell_of_reg_init(struct phy_device *phydev)
@@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev)
static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
{
- int err, oldpage, mscr;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
- if (oldpage < 0)
- return oldpage;
-
- mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG);
- if (mscr < 0) {
- err = mscr;
- goto out;
- }
-
- mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK;
+ int mscr;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
- MII_88E1121_PHY_MSCR_TX_DELAY);
+ mscr = MII_88E1121_PHY_MSCR_RX_DELAY |
+ MII_88E1121_PHY_MSCR_TX_DELAY;
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+ mscr = MII_88E1121_PHY_MSCR_RX_DELAY;
else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
-
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
-
-out:
- marvell_set_page(phydev, oldpage);
+ mscr = MII_88E1121_PHY_MSCR_TX_DELAY;
+ else
+ mscr = 0;
- return err;
+ return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (phy_interface_is_rgmii(phydev)) {
err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err)
+ if (err < 0)
return err;
}
@@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
static int m88e1318_config_aneg(struct phy_device *phydev)
{
- int err, oldpage, mscr;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
- if (oldpage < 0)
- return oldpage;
-
- mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
- mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
-
- err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
- if (err < 0)
- return err;
+ int err;
- err = marvell_set_page(phydev, oldpage);
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1318S_PHY_MSCR1_REG,
+ 0, MII_88E1318S_PHY_MSCR1_PAD_ODD);
if (err < 0)
return err;
@@ -637,6 +612,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
if (err < 0)
goto error;
+ /* Do not touch the fiber page if we're in copper->sgmii mode */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ return 0;
+
/* Then the fiber link */
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
@@ -696,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev)
static int m88e3016_config_init(struct phy_device *phydev)
{
- int reg;
+ int ret;
/* Enable Scrambler and Auto-Crossover */
- reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
- if (reg < 0)
- return reg;
-
- reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
- reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
-
- reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
- if (reg < 0)
- return reg;
+ ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
+ MII_88E3016_DISABLE_SCRAMBLER,
+ MII_88E3016_AUTO_MDIX_CROSSOVER);
+ if (ret < 0)
+ return ret;
return marvell_config_init(phydev);
}
@@ -717,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
u16 mode,
int fibre_copper_auto)
{
- int temp;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~(MII_M1111_HWCFG_MODE_MASK |
- MII_M1111_HWCFG_FIBER_COPPER_AUTO |
- MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= mode;
-
if (fibre_copper_auto)
- temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+ MII_M1111_HWCFG_FIBER_COPPER_RES,
+ mode);
}
static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
{
- int temp;
-
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
+ int delay;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY);
+ delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- temp &= ~MII_M1111_RGMII_TX_DELAY;
- temp |= MII_M1111_RGMII_RX_DELAY;
+ delay = MII_M1111_RGMII_RX_DELAY;
} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- temp &= ~MII_M1111_RGMII_RX_DELAY;
- temp |= MII_M1111_RGMII_TX_DELAY;
+ delay = MII_M1111_RGMII_TX_DELAY;
+ } else {
+ delay = 0;
}
- return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY,
+ delay);
}
static int m88e1111_config_init_rgmii(struct phy_device *phydev)
@@ -798,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
int err;
err = m88e1111_config_init_rgmii_delays(phydev);
- if (err)
+ if (err < 0)
return err;
err = m88e1111_config_init_hwcfg_mode(
@@ -825,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
if (phy_interface_is_rgmii(phydev)) {
err = m88e1111_config_init_rgmii(phydev);
- if (err)
+ if (err < 0)
return err;
}
@@ -850,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev)
static int m88e1121_config_init(struct phy_device *phydev)
{
- int err, oldpage;
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
- if (oldpage < 0)
- return oldpage;
+ int err;
/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
- err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
- MII_88E1121_PHY_LED_DEF);
+ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+ MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
if (err < 0)
return err;
- marvell_set_page(phydev, oldpage);
-
/* Set marvell,reg-init configuration from device tree */
return marvell_config_init(phydev);
}
@@ -871,26 +831,26 @@ static int m88e1121_config_init(struct phy_device *phydev)
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
- int temp;
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ u32 pause;
+
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
return err;
/* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
- temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
- temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
- temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
- err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+ MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
if (err < 0)
return err;
/* PHY reset is necessary after changing MODE[2:0] */
- temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
- err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
+ MII_88E1510_GEN_CTRL_REG_1_RESET);
if (err < 0)
return err;
@@ -898,6 +858,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
+
+ /* There appears to be a bug in the 88e1512 when used in
+ * SGMII to copper mode, where the AN advertisment register
+ * clears the pause bits each time a negotiation occurs.
+ * This means we can never be truely sure what was advertised,
+ * so disable Pause support.
+ */
+ pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported &= ~pause;
+ phydev->advertising &= ~pause;
}
return m88e1121_config_init(phydev);
@@ -986,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev)
static int m88e1145_config_init_rgmii(struct phy_device *phydev)
{
- int temp;
int err;
err = m88e1111_config_init_rgmii_delays(phydev);
@@ -998,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
if (err < 0)
return err;
- temp = phy_read(phydev, 0x1e);
- if (temp < 0)
- return temp;
-
- temp &= 0xf03f;
- temp |= 2 << 9; /* 36 ohm */
- temp |= 2 << 6; /* 39 ohm */
-
- err = phy_write(phydev, 0x1e, temp);
+ err = phy_modify(phydev, 0x1e, 0x0fc0,
+ 2 << 9 | /* 36 ohm */
+ 2 << 6); /* 39 ohm */
if (err < 0)
return err;
@@ -1382,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
+ int oldpage, ret = 0;
+
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
- return;
+ oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
+ if (oldpage < 0)
+ goto error;
- if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
- MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
- if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
- return;
+error:
+ phy_restore_page(phydev, oldpage, ret);
}
static int m88e1318_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int err, oldpage, temp;
+ int err = 0, oldpage;
- oldpage = marvell_get_page(phydev);
+ oldpage = phy_save_page(phydev);
+ if (oldpage < 0)
+ goto error;
if (wol->wolopts & WAKE_MAGIC) {
/* Explicitly switch to page 0x00, just to be sure */
- err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Enable the WOL interrupt */
- temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
- temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
- err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+ MII_88E1318S_PHY_CSIER_WOL_EIE);
if (err < 0)
- return err;
+ goto error;
- err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Setup LED[2] as interrupt pin (active low) */
- temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
- temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
- temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
- temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
- err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
+ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+ MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+ MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
if (err < 0)
- return err;
+ goto error;
- err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Store the device address for the magic packet */
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
((phydev->attached_dev->dev_addr[5] << 8) |
phydev->attached_dev->dev_addr[4]));
if (err < 0)
- return err;
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+ goto error;
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
((phydev->attached_dev->dev_addr[3] << 8) |
phydev->attached_dev->dev_addr[2]));
if (err < 0)
- return err;
- err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+ goto error;
+ err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
((phydev->attached_dev->dev_addr[1] << 8) |
phydev->attached_dev->dev_addr[0]));
if (err < 0)
- return err;
+ goto error;
/* Clear WOL status and enable magic packet matching */
- temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
- temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
- err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
if (err < 0)
- return err;
+ goto error;
} else {
- err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+ err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
- return err;
+ goto error;
/* Clear WOL status and disable magic packet matching */
- temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
- temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
- err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
if (err < 0)
- return err;
+ goto error;
}
- err = marvell_set_page(phydev, oldpage);
- if (err < 0)
- return err;
-
- return 0;
+error:
+ return phy_restore_page(phydev, oldpage, err);
}
static int marvell_get_sset_count(struct phy_device *phydev)
@@ -1503,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
struct marvell_priv *priv = phydev->priv;
- int oldpage, val;
+ int val;
u64 ret;
- oldpage = marvell_get_set_page(phydev, stat.page);
- if (oldpage < 0)
- return UINT64_MAX;
-
- val = phy_read(phydev, stat.reg);
+ val = phy_read_paged(phydev, stat.page, stat.reg);
if (val < 0) {
ret = UINT64_MAX;
} else {
@@ -1519,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
ret = priv->stats[i];
}
- marvell_set_page(phydev, oldpage);
-
return ret;
}
@@ -1537,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev,
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
int oldpage;
- int ret;
+ int ret = 0;
int val;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
+ oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0)
+ goto error;
/* Enable temperature sensor */
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = __phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
goto error;
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = phy_read(phydev, MII_88E1121_MISC_TEST);
+ val = __phy_read(phydev, MII_88E1121_MISC_TEST);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
if (ret < 0)
goto error;
*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return phy_restore_page(phydev, oldpage, ret);
}
static int m88e1121_hwmon_read(struct device *dev,
@@ -1655,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
{
- int oldpage;
int ret;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1510_TEMP_SENSOR);
if (ret < 0)
- goto error;
+ return ret;
*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return 0;
}
static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
{
- int oldpage;
int ret;
*temp = 0;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST);
if (ret < 0)
- goto error;
+ return ret;
*temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
/* convert to mC */
*temp *= 1000;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
-
- return ret;
+ return 0;
}
static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
{
- int oldpage;
- int ret;
-
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
- if (ret < 0)
- goto error;
-
temp = temp / 1000;
temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- ret = phy_write(phydev, MII_88E1121_MISC_TEST,
- (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
- (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
-
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
- return ret;
+ return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST,
+ MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK,
+ temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT);
}
static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
{
- int oldpage;
int ret;
*alarm = false;
- mutex_lock(&phydev->lock);
-
- oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
- if (oldpage < 0) {
- mutex_unlock(&phydev->lock);
- return oldpage;
- }
-
- ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+ MII_88E1121_MISC_TEST);
if (ret < 0)
- goto error;
- *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+ return ret;
-error:
- marvell_set_page(phydev, oldpage);
- mutex_unlock(&phydev->lock);
+ *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
- return ret;
+ return 0;
}
static int m88e1510_hwmon_read(struct device *dev,
@@ -1855,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
.info = m88e1510_hwmon_info,
};
+static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
+{
+ int sum = 0;
+ int oldpage;
+ int ret = 0;
+ int i;
+
+ *temp = 0;
+
+ oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
+ MII_88E6390_MISC_TEST_SAMPLE_1S;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ /* Reading the temperature sense has an errata. You need to read
+ * a number of times and take an average.
+ */
+ for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) {
+ ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR);
+ if (ret < 0)
+ goto error;
+ sum += ret & MII_88E6390_TEMP_SENSOR_MASK;
+ }
+
+ sum /= MII_88E6390_TEMP_SENSOR_SAMPLES;
+ *temp = (sum - 75) * 1000;
+
+ /* Disable temperature sensor */
+ ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+ ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+
+ ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+
+error:
+ phy_restore_page(phydev, oldpage, ret);
+
+ return ret;
+}
+
+static int m88e6390_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e6390_get_temp(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static umode_t m88e6390_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e6390_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e6390_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e6390_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
+ .is_visible = m88e6390_hwmon_is_visible,
+ .read = m88e6390_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
+ .ops = &m88e6390_hwmon_hwmon_ops,
+ .info = m88e6390_hwmon_info,
+};
+
static int marvell_hwmon_name(struct phy_device *phydev)
{
struct marvell_priv *priv = phydev->priv;
@@ -1901,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
+}
#else
static int m88e1121_hwmon_probe(struct phy_device *phydev)
{
@@ -1911,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
{
return 0;
}
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
#endif
static int marvell_probe(struct phy_device *phydev)
@@ -1948,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
+static int m88e6390_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e6390_hwmon_probe(phydev);
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1958,11 +1990,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -1976,11 +2009,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -1999,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = {
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2012,11 +2048,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2036,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2057,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = {
.set_wol = &m88e1318_set_wol,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2069,12 +2110,14 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2088,11 +2131,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2106,11 +2150,12 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2123,12 +2168,12 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2150,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = {
.set_wol = &m88e1318_set_wol,
.resume = &marvell_resume,
.suspend = &marvell_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2170,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2189,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2200,7 +2251,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_aneg = &genphy_config_aneg,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
.read_status = &marvell_read_status,
@@ -2209,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
@@ -2219,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = m88e1510_probe,
+ .probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2228,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = {
.did_interrupt = &m88e1121_did_interrupt,
.resume = &genphy_resume,
.suspend = &genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 21b3f36e023a..8a0bd98fdec7 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -6,12 +6,18 @@
*
* There appears to be several different data paths through the PHY which
* are automatically managed by the PHY. The following has been determined
- * via observation and experimentation:
+ * via observation and experimentation for a setup using single-lane Serdes:
*
* SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
* 10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
* 10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
*
+ * With XAUI, observation shows:
+ *
+ * XAUI PHYXS -- <appropriate PCS as above>
+ *
+ * and no switching of the host interface mode occurs.
+ *
* If both the fiber and copper ports are connected, the first to gain
* link takes priority and the other port is completely locked out.
*/
@@ -23,19 +29,17 @@ enum {
MV_PCS_BASE_R = 0x1000,
MV_PCS_1000BASEX = 0x2000,
+ MV_PCS_PAIRSWAP = 0x8182,
+ MV_PCS_PAIRSWAP_MASK = 0x0003,
+ MV_PCS_PAIRSWAP_AB = 0x0002,
+ MV_PCS_PAIRSWAP_NONE = 0x0003,
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
*/
MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */
MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
-
- /* This register appears to reflect the copper status */
- MV_AN_RESULT = 0xa016,
- MV_AN_RESULT_SPD_10 = BIT(12),
- MV_AN_RESULT_SPD_100 = BIT(13),
- MV_AN_RESULT_SPD_1000 = BIT(14),
- MV_AN_RESULT_SPD_10000 = BIT(15),
};
static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
@@ -84,7 +88,6 @@ static int mv3310_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
- phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
phydev->interface != PHY_INTERFACE_MODE_10GKR)
@@ -150,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev)
if (val & MDIO_PMA_EXTABLE_1000BKX)
__set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
supported);
- if (val & MDIO_PMA_EXTABLE_100BTX)
+ if (val & MDIO_PMA_EXTABLE_100BTX) {
__set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
supported);
- if (val & MDIO_PMA_EXTABLE_10BT)
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ supported);
+ }
+ if (val & MDIO_PMA_EXTABLE_10BT) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
supported);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ supported);
+ }
}
if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
@@ -175,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev)
u32 advertising;
int ret;
+ /* We don't support manual MDI control */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
if (phydev->autoneg == AUTONEG_DISABLE) {
ret = genphy_c45_pma_setup_forced(phydev);
if (ret < 0)
@@ -233,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev)
return genphy_c45_aneg_done(phydev);
}
+static void mv3310_update_interface(struct phy_device *phydev)
+{
+ if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+ /* The PHY automatically switches its serdes interface (and
+ * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+ * modes according to the speed. Florian suggests setting
+ * phydev->interface to communicate this to the MAC. Only do
+ * this if we are already in either SGMII or 10GBase-KR mode.
+ */
+ if (phydev->speed == SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ else if (phydev->speed >= SPEED_10 &&
+ phydev->speed < SPEED_10000)
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ }
+}
+
/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
static int mv3310_read_10gbr_status(struct phy_device *phydev)
{
@@ -240,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ mv3310_update_interface(phydev);
return 0;
}
@@ -264,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->link = 0;
phydev->pause = 0;
phydev->asym_pause = 0;
+ phydev->mdix = 0;
val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
if (val < 0)
@@ -294,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
- if (phydev->autoneg == AUTONEG_ENABLE) {
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
- if (val < 0)
- return val;
-
- if (val & MV_AN_RESULT_SPD_10000)
- phydev->speed = SPEED_10000;
- else if (val & MV_AN_RESULT_SPD_1000)
- phydev->speed = SPEED_1000;
- else if (val & MV_AN_RESULT_SPD_100)
- phydev->speed = SPEED_100;
- else if (val & MV_AN_RESULT_SPD_10)
- phydev->speed = SPEED_10;
-
- phydev->duplex = DUPLEX_FULL;
- }
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ phy_resolve_aneg_linkmode(phydev);
}
if (phydev->autoneg != AUTONEG_ENABLE) {
@@ -318,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev)
return val;
}
- if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
- /* The PHY automatically switches its serdes interface (and
- * active PHYXS instance) between Cisco SGMII and 10GBase-KR
- * modes according to the speed. Florian suggests setting
- * phydev->interface to communicate this to the MAC. Only do
- * this if we are already in either SGMII or 10GBase-KR mode.
- */
- if (phydev->speed == SPEED_10000)
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
- else if (phydev->speed >= SPEED_10 &&
- phydev->speed < SPEED_10000)
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ if (phydev->speed == SPEED_10000) {
+ val = genphy_c45_read_mdix(phydev);
+ if (val < 0)
+ return val;
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
+ if (val < 0)
+ return val;
+
+ switch (val & MV_PCS_PAIRSWAP_MASK) {
+ case MV_PCS_PAIRSWAP_AB:
+ phydev->mdix = ETH_TP_MDI_X;
+ break;
+ case MV_PCS_PAIRSWAP_NONE:
+ phydev->mdix = ETH_TP_MDI;
+ break;
+ default:
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
}
+ mv3310_update_interface(phydev);
+
return 0;
}
@@ -342,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.features = SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP |
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 08e0647b85e2..8d370667fa1b 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* Just ioremap, as this MDIO block is usually integrated into an
* Ethernet MAC controller register range
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 135296508a7e..6425ce04d3f9 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index bfd3090fb055..07c6048200c6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
}
ret = xgene_enet_ecc_init(pdata);
- if (ret)
+ if (ret) {
+ if (pdata->dev->of_node)
+ clk_disable_unprepare(pdata->clk);
return ret;
+ }
xgene_gmac_reset(pdata);
return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
return ret;
mdio_bus = mdiobus_alloc();
- if (!mdio_bus)
- return -ENOMEM;
+ if (!mdio_bus) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
mdio_bus->name = "APM X-Gene MDIO bus";
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
mdio_bus->phy_mask = ~0;
ret = mdiobus_register(mdio_bus);
if (ret)
- goto out;
+ goto out_mdiobus;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
}
if (ret)
- goto out;
+ goto out_mdiobus;
pdata->mdio_bus = mdio_bus;
xgene_mdio_status = true;
return 0;
-out:
+out_mdiobus:
mdiobus_free(mdio_bus);
+out_clk:
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
+
return ret;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c1a36..88272b3ac2e2 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,6 +38,7 @@
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/gpio/consumer.h>
#include <asm/irq.h>
@@ -46,11 +47,41 @@
#include "mdio-boardinfo.h"
+static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
+{
+ struct gpio_desc *gpiod = NULL;
+
+ /* Deassert the optional reset signal */
+ if (mdiodev->dev.of_node)
+ gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
+ "reset-gpios", 0, GPIOD_OUT_LOW,
+ "PHY reset");
+ if (PTR_ERR(gpiod) == -ENOENT)
+ gpiod = NULL;
+ else if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ mdiodev->reset = gpiod;
+
+ /* Assert the reset signal again */
+ mdio_device_reset(mdiodev, 1);
+
+ return 0;
+}
+
int mdiobus_register_device(struct mdio_device *mdiodev)
{
+ int err;
+
if (mdiodev->bus->mdio_map[mdiodev->addr])
return -EBUSY;
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
+ err = mdiobus_register_gpiod(mdiodev);
+ if (err)
+ return err;
+ }
+
mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
return 0;
@@ -270,6 +301,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
if (addr == mdiodev->addr) {
dev->of_node = child;
+ dev->fwnode = of_fwnode_handle(child);
return;
}
}
@@ -420,6 +452,9 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
+ if (mdiodev->reset)
+ gpiod_put(mdiodev->reset);
+
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
@@ -493,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
EXPORT_SYMBOL(mdiobus_scan);
/**
+ * __mdiobus_read - Unlocked version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+ int retval;
+
+ WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+ retval = bus->read(bus, addr, regnum);
+
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(__mdiobus_read);
+
+/**
+ * __mdiobus_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+ int err;
+
+ WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+ err = bus->write(bus, addr, regnum, val);
+
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_write);
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -512,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- retval = bus->read(bus, addr, regnum);
+ retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
return retval;
}
EXPORT_SYMBOL(mdiobus_read_nested);
@@ -538,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
- retval = bus->read(bus, addr, regnum);
+ retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
return retval;
}
EXPORT_SYMBOL(mdiobus_read);
@@ -568,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- err = bus->write(bus, addr, regnum, val);
+ err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 0, addr, regnum, val, err);
-
return err;
}
EXPORT_SYMBOL(mdiobus_write_nested);
@@ -595,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
mutex_lock(&bus->mdio_lock);
- err = bus->write(bus, addr, regnum, val);
+ err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
- trace_mdio_access(bus, 0, addr, regnum, val, err);
-
return err;
}
EXPORT_SYMBOL(mdiobus_write);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e24f28924af8..c924700cf37b 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -12,6 +12,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -22,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/unistd.h>
+#include <linux/delay.h>
void mdio_device_free(struct mdio_device *mdiodev)
{
@@ -114,6 +117,21 @@ void mdio_device_remove(struct mdio_device *mdiodev)
}
EXPORT_SYMBOL(mdio_device_remove);
+void mdio_device_reset(struct mdio_device *mdiodev, int value)
+{
+ unsigned int d;
+
+ if (!mdiodev->reset)
+ return;
+
+ gpiod_set_value(mdiodev->reset, value);
+
+ d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
+ if (d)
+ usleep_range(d, d + max_t(unsigned int, d / 10, 100));
+}
+EXPORT_SYMBOL(mdio_device_reset);
+
/**
* mdio_probe - probe an MDIO device
* @dev: device to probe
@@ -128,8 +146,16 @@ static int mdio_probe(struct device *dev)
struct mdio_driver *mdiodrv = to_mdio_driver(drv);
int err = 0;
- if (mdiodrv->probe)
+ if (mdiodrv->probe) {
+ /* Deassert the reset signal */
+ mdio_device_reset(mdiodev, 0);
+
err = mdiodrv->probe(mdiodev);
+ if (err) {
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
+ }
+ }
return err;
}
@@ -140,9 +166,13 @@ static int mdio_remove(struct device *dev)
struct device_driver *drv = mdiodev->dev.driver;
struct mdio_driver *mdiodrv = to_mdio_driver(drv);
- if (mdiodrv->remove)
+ if (mdiodrv->remove) {
mdiodrv->remove(mdiodev);
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7585d9..ddc2c5ea3787 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,32 +22,208 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
-static int meson_gxl_config_init(struct phy_device *phydev)
+#define TSTCNTL 20
+#define TSTCNTL_READ BIT(15)
+#define TSTCNTL_WRITE BIT(14)
+#define TSTCNTL_REG_BANK_SEL GENMASK(12, 11)
+#define TSTCNTL_TEST_MODE BIT(10)
+#define TSTCNTL_READ_ADDRESS GENMASK(9, 5)
+#define TSTCNTL_WRITE_ADDRESS GENMASK(4, 0)
+#define TSTREAD1 21
+#define TSTWRITE 23
+#define INTSRC_FLAG 29
+#define INTSRC_ANEG_PR BIT(1)
+#define INTSRC_PARALLEL_FAULT BIT(2)
+#define INTSRC_ANEG_LP_ACK BIT(3)
+#define INTSRC_LINK_DOWN BIT(4)
+#define INTSRC_REMOTE_FAULT BIT(5)
+#define INTSRC_ANEG_COMPLETE BIT(6)
+#define INTSRC_MASK 30
+
+#define BANK_ANALOG_DSP 0
+#define BANK_WOL 1
+#define BANK_BIST 3
+
+/* WOL Registers */
+#define LPI_STATUS 0xc
+#define LPI_STATUS_RSV12 BIT(12)
+
+/* BIST Registers */
+#define FR_PLL_CONTROL 0x1b
+#define FR_PLL_DIV0 0x1c
+#define FR_PLL_DIV1 0x1d
+
+static int meson_gxl_open_banks(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable Analog and DSP register Bank access by
+ * toggling TSTCNTL_TEST_MODE bit in the TSTCNTL register
+ */
+ ret = phy_write(phydev, TSTCNTL, 0);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, TSTCNTL, 0);
+ if (ret)
+ return ret;
+ return phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+}
+
+static void meson_gxl_close_banks(struct phy_device *phydev)
+{
+ phy_write(phydev, TSTCNTL, 0);
+}
+
+static int meson_gxl_read_reg(struct phy_device *phydev,
+ unsigned int bank, unsigned int reg)
{
- /* Enable Analog and DSP register Bank access by */
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x14, 0x0400);
- phy_write(phydev, 0x14, 0x0000);
- phy_write(phydev, 0x14, 0x0400);
+ int ret;
- /* Write Analog register 23 */
- phy_write(phydev, 0x17, 0x8E0D);
- phy_write(phydev, 0x14, 0x4417);
+ ret = meson_gxl_open_banks(phydev);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ |
+ FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+ TSTCNTL_TEST_MODE |
+ FIELD_PREP(TSTCNTL_READ_ADDRESS, reg));
+ if (ret)
+ goto out;
+
+ ret = phy_read(phydev, TSTREAD1);
+out:
+ /* Close the bank access on our way out */
+ meson_gxl_close_banks(phydev);
+ return ret;
+}
+
+static int meson_gxl_write_reg(struct phy_device *phydev,
+ unsigned int bank, unsigned int reg,
+ uint16_t value)
+{
+ int ret;
+
+ ret = meson_gxl_open_banks(phydev);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTWRITE, value);
+ if (ret)
+ goto out;
+
+ ret = phy_write(phydev, TSTCNTL, TSTCNTL_WRITE |
+ FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+ TSTCNTL_TEST_MODE |
+ FIELD_PREP(TSTCNTL_WRITE_ADDRESS, reg));
+
+out:
+ /* Close the bank access on our way out */
+ meson_gxl_close_banks(phydev);
+ return ret;
+}
+
+static int meson_gxl_config_init(struct phy_device *phydev)
+{
+ int ret;
/* Enable fractional PLL */
- phy_write(phydev, 0x17, 0x0005);
- phy_write(phydev, 0x14, 0x5C1B);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_CONTROL, 0x5);
+ if (ret)
+ return ret;
/* Program fraction FR_PLL_DIV1 */
- phy_write(phydev, 0x17, 0x029A);
- phy_write(phydev, 0x14, 0x5C1D);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV1, 0x029a);
+ if (ret)
+ return ret;
/* Program fraction FR_PLL_DIV1 */
- phy_write(phydev, 0x17, 0xAAAA);
- phy_write(phydev, 0x14, 0x5C1C);
+ ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV0, 0xaaaa);
+ if (ret)
+ return ret;
+
+ return genphy_config_init(phydev);
+}
+
+/* This function is provided to cope with the possible failures of this phy
+ * during aneg process. When aneg fails, the PHY reports that aneg is done
+ * but the value found in MII_LPA is wrong:
+ * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
+ * the link partner (LP) supports aneg but the LP never acked our base
+ * code word, it is likely that we never sent it to begin with.
+ * - Late failures: MII_LPA is filled with a value which seems to make sense
+ * but it actually is not what the LP is advertising. It seems that we
+ * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
+ * If this particular bit is not set when aneg is reported being done,
+ * it means MII_LPA is likely to be wrong.
+ *
+ * In both case, forcing a restart of the aneg process solve the problem.
+ * When this failure happens, the first retry is usually successful but,
+ * in some cases, it may take up to 6 retries to get a decent result
+ */
+static int meson_gxl_read_status(struct phy_device *phydev)
+{
+ int ret, wol, lpa, exp;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_aneg_done(phydev);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ goto read_status_continue;
+
+ /* Aneg is done, let's check everything is fine */
+ wol = meson_gxl_read_reg(phydev, BANK_WOL, LPI_STATUS);
+ if (wol < 0)
+ return wol;
+
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ exp = phy_read(phydev, MII_EXPANSION);
+ if (exp < 0)
+ return exp;
+
+ if (!(wol & LPI_STATUS_RSV12) ||
+ ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
+ /* Looks like aneg failed after all */
+ phydev_dbg(phydev, "LPA corruption - aneg restart\n");
+ return genphy_restart_aneg(phydev);
+ }
+ }
+
+read_status_continue:
+ return genphy_read_status(phydev);
+}
+
+static int meson_gxl_ack_interrupt(struct phy_device *phydev)
+{
+ int ret = phy_read(phydev, INTSRC_FLAG);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int meson_gxl_config_intr(struct phy_device *phydev)
+{
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ val = INTSRC_ANEG_PR
+ | INTSRC_PARALLEL_FAULT
+ | INTSRC_ANEG_LP_ACK
+ | INTSRC_LINK_DOWN
+ | INTSRC_REMOTE_FAULT
+ | INTSRC_ANEG_COMPLETE;
+ } else {
+ val = 0;
+ }
- return 0;
+ return phy_write(phydev, INTSRC_MASK, val);
}
static struct phy_driver meson_gxl_phy[] = {
@@ -56,11 +232,12 @@ static struct phy_driver meson_gxl_phy[] = {
.phy_id_mask = 0xfffffff0,
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
.config_init = meson_gxl_config_init,
- .config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
- .read_status = genphy_read_status,
+ .read_status = meson_gxl_read_status,
+ .ack_interrupt = meson_gxl_ack_interrupt,
+ .config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
@@ -78,4 +255,5 @@ MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl);
MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver");
MODULE_AUTHOR("Baoqi wang");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ab4614113403..0f45310300f6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev);
+ return genphy_config_aneg(phydev);
}
return 0;
@@ -801,8 +802,6 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
@@ -816,8 +815,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -834,8 +831,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -853,7 +848,6 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
.config_aneg = ksz8041_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -870,8 +864,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -888,8 +880,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -906,8 +896,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -924,8 +912,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -940,8 +926,6 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
@@ -955,8 +939,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -975,7 +957,6 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = ksz9031_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1000,8 +981,6 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1021,8 +1000,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Microchip KSZ9477",
.features = PHY_GBIT_FEATURES,
.config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 37ee856c7680..0f293ef28935 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -153,7 +153,6 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = genphy_config_init,
.config_aneg = lan88xx_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2addf1d3f619..2b1e336961f9 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -136,8 +136,6 @@ static struct phy_driver dp83865_driver[] = { {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
} };
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dada819c6b78..a4576859afae 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
+/**
+ * genphy_c45_read_mdix - read mdix status from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_mdix(struct phy_device *phydev)
+{
+ int val;
+
+ if (phydev->speed == SPEED_10000) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBT_SWAPPOL);
+ if (val < 0)
+ return val;
+
+ switch (val) {
+ case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+ phydev->mdix = ETH_TP_MDI;
+ break;
+
+ case 0:
+ phydev->mdix = ETH_TP_MDI_X;
+ break;
+
+ default:
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
+
/* The gen10g_* functions are the old Clause 45 stub */
static int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 21f75ae244b3..4083f00c97a5 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
+/**
+ * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * @phydev: The phy_device struct
+ *
+ * Resolve our and the link partner advertisments into their corresponding
+ * speed and duplex. If full duplex was negotiated, extract the pause mode
+ * from the link partner mask.
+ */
+void phy_resolve_aneg_linkmode(struct phy_device *phydev)
+{
+ u32 common = phydev->lp_advertising & phydev->advertising;
+
+ if (common & ADVERTISED_10000baseT_Full) {
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_1000baseT_Full) {
+ phydev->speed = SPEED_1000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_1000baseT_Half) {
+ phydev->speed = SPEED_1000;
+ phydev->duplex = DUPLEX_HALF;
+ } else if (common & ADVERTISED_100baseT_Full) {
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_100baseT_Half) {
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_HALF;
+ } else if (common & ADVERTISED_10baseT_Full) {
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (common & ADVERTISED_10baseT_Half) {
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
+ phydev->asym_pause = !!(phydev->lp_advertising &
+ ADVERTISED_Asym_Pause);
+ }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
u16 regnum)
{
/* Write the desired MMD Devad */
- bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+ __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad);
/* Write the desired MMD register address */
- bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
/* Select the Function : DATA with no post increment */
- bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+ __mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+ devad | MII_MMD_CTRL_NOINCR);
}
/**
@@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Read the content of the MMD's selected register */
- val = bus->read(bus, phy_addr, MII_MMD_DATA);
+ val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
mutex_unlock(&bus->mdio_lock);
}
return val;
@@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Write the data into MMD's selected register */
- bus->write(bus, phy_addr, MII_MMD_DATA, val);
+ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
mutex_unlock(&bus->mdio_lock);
ret = 0;
@@ -279,3 +323,207 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
return ret;
}
EXPORT_SYMBOL(phy_write_mmd);
+
+/**
+ * __phy_modify() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_read(phydev, regnum);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, regnum, (ret & ~mask) | set);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(__phy_modify);
+
+/**
+ * phy_modify - Convenience function for modifying a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify(phydev, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify);
+
+static int __phy_read_page(struct phy_device *phydev)
+{
+ return phydev->drv->read_page(phydev);
+}
+
+static int __phy_write_page(struct phy_device *phydev, int page)
+{
+ return phydev->drv->write_page(phydev, page);
+}
+
+/**
+ * phy_save_page() - take the bus lock and save the current page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Take the MDIO bus lock, and return the current page number. On error,
+ * returns a negative errno. phy_restore_page() must always be called
+ * after this, irrespective of success or failure of this call.
+ */
+int phy_save_page(struct phy_device *phydev)
+{
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ return __phy_read_page(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_save_page);
+
+/**
+ * phy_select_page() - take the bus lock, save the current page, and set a page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: desired page
+ *
+ * Take the MDIO bus lock to protect against concurrent access, save the
+ * current PHY page, and set the current page. On error, returns a
+ * negative errno, otherwise returns the previous page number.
+ * phy_restore_page() must always be called after this, irrespective
+ * of success or failure of this call.
+ */
+int phy_select_page(struct phy_device *phydev, int page)
+{
+ int ret, oldpage;
+
+ oldpage = ret = phy_save_page(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (oldpage != page) {
+ ret = __phy_write_page(phydev, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ return oldpage;
+}
+EXPORT_SYMBOL_GPL(phy_select_page);
+
+/**
+ * phy_restore_page() - restore the page register and release the bus lock
+ * @phydev: a pointer to a &struct phy_device
+ * @oldpage: the old page, return value from phy_save_page() or phy_select_page()
+ * @ret: operation's return code
+ *
+ * Release the MDIO bus lock, restoring @oldpage if it is a valid page.
+ * This function propagates the earliest error code from the group of
+ * operations.
+ *
+ * Returns:
+ * @oldpage if it was a negative value, otherwise
+ * @ret if it was a negative errno value, otherwise
+ * phy_write_page()'s negative value if it were in error, otherwise
+ * @ret.
+ */
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
+{
+ int r;
+
+ if (oldpage >= 0) {
+ r = __phy_write_page(phydev, oldpage);
+
+ /* Propagate the operation return code if the page write
+ * was successful.
+ */
+ if (ret >= 0 && r < 0)
+ ret = r;
+ } else {
+ /* Propagate the phy page selection error code */
+ ret = oldpage;
+ }
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restore_page);
+
+/**
+ * phy_read_paged() - Convenience function for reading a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ *
+ * Same rules as for phy_read().
+ */
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_read(phydev, regnum);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_read_paged);
+
+/**
+ * phy_write_paged() - Convenience function for writing a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @val: value to write
+ *
+ * Same rules as for phy_write().
+ */
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_write(phydev, regnum, val);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_write_paged);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret = 0, oldpage;
+
+ oldpage = phy_select_page(phydev, page);
+ if (oldpage >= 0)
+ ret = __phy_modify(phydev, regnum, mask, set);
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67bc1e73..f3313a129531 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -493,7 +493,10 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
/* Invalidate LP advertising flags */
phydev->lp_advertising = 0;
- err = phydev->drv->config_aneg(phydev);
+ if (phydev->drv->config_aneg)
+ err = phydev->drv->config_aneg(phydev);
+ else
+ err = genphy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
@@ -629,9 +632,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (PHY_HALTED == phydev->state)
return IRQ_NONE; /* It can't be ours. */
- disable_irq_nosync(irq);
- atomic_inc(&phydev->irq_disable);
-
phy_change(phydev);
return IRQ_HANDLED;
@@ -689,7 +689,6 @@ phy_err:
*/
int phy_start_interrupts(struct phy_device *phydev)
{
- atomic_set(&phydev->irq_disable, 0);
if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
IRQF_ONESHOT | IRQF_SHARED,
phydev_name(phydev), phydev) < 0) {
@@ -716,13 +715,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
free_irq(phydev->irq, phydev);
- /* If work indeed has been cancelled, disable_irq() will have
- * been left unbalanced from phy_interrupt() and enable_irq()
- * has to be called so that other devices on the line work.
- */
- while (atomic_dec_return(&phydev->irq_disable) >= 0)
- enable_irq(phydev->irq);
-
return err;
}
EXPORT_SYMBOL(phy_stop_interrupts);
@@ -736,10 +728,11 @@ void phy_change(struct phy_device *phydev)
if (phy_interrupt_is_valid(phydev)) {
if (phydev->drv->did_interrupt &&
!phydev->drv->did_interrupt(phydev))
- goto ignore;
+ return;
- if (phy_disable_interrupts(phydev))
- goto phy_err;
+ if (phydev->state == PHY_HALTED)
+ if (phy_disable_interrupts(phydev))
+ goto phy_err;
}
mutex_lock(&phydev->lock);
@@ -747,28 +740,13 @@ void phy_change(struct phy_device *phydev)
phydev->state = PHY_CHANGELINK;
mutex_unlock(&phydev->lock);
- if (phy_interrupt_is_valid(phydev)) {
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
-
- /* Reenable interrupts */
- if (PHY_HALTED != phydev->state &&
- phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
- goto irq_enable_err;
- }
-
/* reschedule state queue work to run as soon as possible */
phy_trigger_machine(phydev, true);
- return;
-ignore:
- atomic_dec(&phydev->irq_disable);
- enable_irq(phydev->irq);
+ if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ goto phy_err;
return;
-irq_enable_err:
- disable_irq(phydev->irq);
- atomic_inc(&phydev->irq_disable);
phy_err:
phy_error(phydev);
}
@@ -828,7 +806,6 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- bool do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -841,6 +818,9 @@ void phy_start(struct phy_device *phydev)
phydev->state = PHY_UP;
break;
case PHY_HALTED:
+ /* if phy was suspended, bring the physical link up again */
+ phy_resume(phydev);
+
/* make sure interrupts are re-enabled for the PHY */
if (phydev->irq != PHY_POLL) {
err = phy_enable_interrupts(phydev);
@@ -849,17 +829,12 @@ void phy_start(struct phy_device *phydev)
}
phydev->state = PHY_RESUMING;
- do_resume = true;
break;
default:
break;
}
mutex_unlock(&phydev->lock);
- /* if phy was suspended, bring the physical link up again */
- if (do_resume)
- phy_resume(phydev);
-
phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
@@ -1006,10 +981,6 @@ void phy_state_machine(struct work_struct *work)
phydev->state = PHY_NOLINK;
phy_link_down(phydev, true);
}
-
- if (phy_interrupt_is_valid(phydev))
- err = phy_config_interrupt(phydev,
- PHY_INTERRUPT_ENABLED);
break;
case PHY_HALTED:
if (phydev->link) {
@@ -1086,16 +1057,12 @@ void phy_state_machine(struct work_struct *work)
/**
* phy_mac_interrupt - MAC says the link has changed
* @phydev: phy_device struct with changed link
- * @new_link: Link is Up/Down.
*
- * Description: The MAC layer is able indicate there has been a change
- * in the PHY link status. Set the new link status, and trigger the
- * state machine, work a work queue.
+ * The MAC layer is able to indicate there has been a change in the PHY link
+ * status. Trigger the state machine and work a work queue.
*/
-void phy_mac_interrupt(struct phy_device *phydev, int new_link)
+void phy_mac_interrupt(struct phy_device *phydev)
{
- phydev->link = new_link;
-
/* Trigger a state machine change */
queue_work(system_power_efficient_wq, &phydev->phy_queue);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac29025..b13eed21c87d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
goto no_resume;
+ mutex_lock(&phydev->lock);
ret = phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
if (ret < 0)
return ret;
@@ -632,6 +634,9 @@ int phy_device_register(struct phy_device *phydev)
if (err)
return err;
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
/* Run all of the fixups for this PHY */
err = phy_scan_fixups(phydev);
if (err) {
@@ -650,6 +655,9 @@ int phy_device_register(struct phy_device *phydev)
return 0;
out:
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
mdiobus_unregister_device(&phydev->mdio);
return err;
}
@@ -666,6 +674,10 @@ EXPORT_SYMBOL(phy_device_register);
void phy_device_remove(struct phy_device *phydev)
{
device_del(&phydev->mdio.dev);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
mdiobus_unregister_device(&phydev->mdio);
}
EXPORT_SYMBOL(phy_device_remove);
@@ -849,6 +861,9 @@ int phy_init_hw(struct phy_device *phydev)
{
int ret = 0;
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
if (!phydev->drv || !phydev->drv->config_init)
return 0;
@@ -1026,7 +1041,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
+ mutex_lock(&phydev->lock);
phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
phy_led_triggers_register(phydev);
return err;
@@ -1126,6 +1143,9 @@ void phy_detach(struct phy_device *phydev)
put_device(&phydev->mdio.dev);
if (ndev_owner != bus->owner)
module_put(bus->owner);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
@@ -1157,6 +1177,8 @@ int phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0;
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
if (phydev->drv && phydrv->resume)
ret = phydrv->resume(phydev);
@@ -1202,6 +1224,30 @@ out:
}
EXPORT_SYMBOL(phy_loopback);
+/**
+ * phy_reset_after_clk_enable - perform a PHY reset if needed
+ * @phydev: target phy_device struct
+ *
+ * Description: Some PHYs are known to need a reset after their refclk was
+ * enabled. This function evaluates the flags and perform the reset if it's
+ * needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy
+ * was reset.
+ */
+int phy_reset_after_clk_enable(struct phy_device *phydev)
+{
+ if (!phydev || !phydev->drv)
+ return -ENODEV;
+
+ if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) {
+ phy_device_reset(phydev, 1);
+ phy_device_reset(phydev, 0);
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_reset_after_clk_enable);
+
/* Generic PHY support and helper functions */
/**
@@ -1322,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int ctl = phy_read(phydev, MII_BMCR);
+ u16 ctl = 0;
- ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -1336,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev)
if (DUPLEX_FULL == phydev->duplex)
ctl |= BMCR_FULLDPLX;
- return phy_write(phydev, MII_BMCR, ctl);
+ return phy_modify(phydev, MII_BMCR,
+ BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
@@ -1346,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced);
*/
int genphy_restart_aneg(struct phy_device *phydev)
{
- int ctl = phy_read(phydev, MII_BMCR);
-
- if (ctl < 0)
- return ctl;
-
- ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
-
/* Don't isolate the PHY if we're negotiating */
- ctl &= ~BMCR_ISOLATE;
-
- return phy_write(phydev, MII_BMCR, ctl);
+ return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
}
EXPORT_SYMBOL(genphy_restart_aneg);
@@ -1622,48 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init);
int genphy_suspend(struct phy_device *phydev)
{
- int value;
-
- mutex_lock(&phydev->lock);
-
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
+ return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
}
EXPORT_SYMBOL(genphy_suspend);
int genphy_resume(struct phy_device *phydev)
{
- int value;
-
- mutex_lock(&phydev->lock);
-
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
}
EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
- int value;
-
- value = phy_read(phydev, MII_BMCR);
- if (value < 0)
- return value;
-
- if (enable)
- value |= BMCR_LOOPBACK;
- else
- value &= ~BMCR_LOOPBACK;
-
- return phy_write(phydev, MII_BMCR, value);
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ enable ? BMCR_LOOPBACK : 0);
}
EXPORT_SYMBOL(genphy_loopback);
@@ -1811,8 +1821,16 @@ static int phy_probe(struct device *dev)
/* Set the state to READY by default */
phydev->state = PHY_READY;
- if (phydev->drv->probe)
+ if (phydev->drv->probe) {
+ /* Deassert the reset signal */
+ phy_device_reset(phydev, 0);
+
err = phydev->drv->probe(phydev);
+ if (err) {
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+ }
+ }
mutex_unlock(&phydev->lock);
@@ -1829,8 +1847,12 @@ static int phy_remove(struct device *dev)
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
- if (phydev->drv && phydev->drv->remove)
+ if (phydev->drv && phydev->drv->remove) {
phydev->drv->remove(phydev);
+
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+ }
phydev->drv = NULL;
return 0;
@@ -1907,9 +1929,7 @@ static struct phy_driver genphy_driver = {
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC,
- .config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.set_loopback = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5dc9668dde34..6ac8b29b2dc3 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -36,7 +36,11 @@ enum {
PHYLINK_DISABLE_LINK,
};
+/**
+ * struct phylink - internal data type for phylink
+ */
struct phylink {
+ /* private: */
struct net_device *netdev;
const struct phylink_mac_ops *ops;
@@ -50,6 +54,8 @@ struct phylink {
/* The link configuration settings */
struct phylink_link_state link_config;
struct gpio_desc *link_gpio;
+ void (*get_fixed_state)(struct net_device *dev,
+ struct phylink_link_state *s);
struct mutex state_mutex;
struct phylink_link_state phy_state;
@@ -87,6 +93,13 @@ static inline bool linkmode_empty(const unsigned long *src)
return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+/**
+ * phylink_set_port_modes() - set the port type modes in the ethtool mask
+ * @mask: ethtool link mode mask
+ *
+ * Sets all the port type modes in the ethtool mask. MAC drivers should
+ * use this in their 'validate' callback.
+ */
void phylink_set_port_modes(unsigned long *mask)
{
phylink_set(mask, TP);
@@ -117,8 +130,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static const char *modestr[] = {
[MLO_AN_PHY] = "phy",
[MLO_AN_FIXED] = "fixed",
- [MLO_AN_SGMII] = "SGMII",
- [MLO_AN_8023Z] = "802.3z",
+ [MLO_AN_INBAND] = "inband",
};
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
@@ -132,59 +144,64 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+static int phylink_parse_fixedlink(struct phylink *pl,
+ struct fwnode_handle *fwnode)
{
- struct device_node *fixed_node;
+ struct fwnode_handle *fixed_node;
const struct phy_setting *s;
struct gpio_desc *desc;
- const __be32 *fixed_prop;
u32 speed;
- int ret, len;
+ int ret;
- fixed_node = of_get_child_by_name(np, "fixed-link");
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
if (fixed_node) {
- ret = of_property_read_u32(fixed_node, "speed", &speed);
+ ret = fwnode_property_read_u32(fixed_node, "speed", &speed);
pl->link_config.speed = speed;
pl->link_config.duplex = DUPLEX_HALF;
- if (of_property_read_bool(fixed_node, "full-duplex"))
+ if (fwnode_property_read_bool(fixed_node, "full-duplex"))
pl->link_config.duplex = DUPLEX_FULL;
/* We treat the "pause" and "asym-pause" terminology as
* defining the link partner's ability. */
- if (of_property_read_bool(fixed_node, "pause"))
+ if (fwnode_property_read_bool(fixed_node, "pause"))
pl->link_config.pause |= MLO_PAUSE_SYM;
- if (of_property_read_bool(fixed_node, "asym-pause"))
+ if (fwnode_property_read_bool(fixed_node, "asym-pause"))
pl->link_config.pause |= MLO_PAUSE_ASYM;
if (ret == 0) {
- desc = fwnode_get_named_gpiod(&fixed_node->fwnode,
- "link-gpios", 0,
- GPIOD_IN, "?");
+ desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
+ 0, GPIOD_IN, "?");
if (!IS_ERR(desc))
pl->link_gpio = desc;
else if (desc == ERR_PTR(-EPROBE_DEFER))
ret = -EPROBE_DEFER;
}
- of_node_put(fixed_node);
+ fwnode_handle_put(fixed_node);
if (ret)
return ret;
} else {
- fixed_prop = of_get_property(np, "fixed-link", &len);
- if (!fixed_prop) {
+ u32 prop[5];
+
+ ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ NULL, 0);
+ if (ret != ARRAY_SIZE(prop)) {
netdev_err(pl->netdev, "broken fixed-link?\n");
return -EINVAL;
}
- if (len == 5 * sizeof(*fixed_prop)) {
- pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ?
+
+ ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ prop, ARRAY_SIZE(prop));
+ if (!ret) {
+ pl->link_config.duplex = prop[1] ?
DUPLEX_FULL : DUPLEX_HALF;
- pl->link_config.speed = be32_to_cpu(fixed_prop[2]);
- if (be32_to_cpu(fixed_prop[3]))
+ pl->link_config.speed = prop[2];
+ if (prop[3])
pl->link_config.pause |= MLO_PAUSE_SYM;
- if (be32_to_cpu(fixed_prop[4]))
+ if (prop[4])
pl->link_config.pause |= MLO_PAUSE_ASYM;
}
}
@@ -220,17 +237,17 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
return 0;
}
-static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
+static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
{
- struct device_node *dn;
+ struct fwnode_handle *dn;
const char *managed;
- dn = of_get_child_by_name(np, "fixed-link");
- if (dn || of_find_property(np, "fixed-link", NULL))
+ dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (dn || fwnode_property_present(fwnode, "fixed-link"))
pl->link_an_mode = MLO_AN_FIXED;
- of_node_put(dn);
+ fwnode_handle_put(dn);
- if (of_property_read_string(np, "managed", &managed) == 0 &&
+ if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
if (pl->link_an_mode == MLO_AN_FIXED) {
netdev_err(pl->netdev,
@@ -244,6 +261,7 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
pl->link_config.an_enabled = true;
+ pl->link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -253,17 +271,14 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, 100baseT_Full);
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
- pl->link_an_mode = MLO_AN_SGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
phylink_set(pl->supported, 1000baseX_Full);
- pl->link_an_mode = MLO_AN_8023Z;
break;
case PHY_INTERFACE_MODE_2500BASEX:
phylink_set(pl->supported, 2500baseX_Full);
- pl->link_an_mode = MLO_AN_8023Z;
break;
case PHY_INTERFACE_MODE_10GKR:
@@ -280,7 +295,6 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
phylink_set(pl->supported, 10000baseLR_Full);
phylink_set(pl->supported, 10000baseLRM_Full);
phylink_set(pl->supported, 10000baseER_Full);
- pl->link_an_mode = MLO_AN_SGMII;
break;
default:
@@ -320,8 +334,7 @@ static void phylink_mac_config(struct phylink *pl,
static void phylink_mac_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
- (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX ||
- pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX))
+ phy_interface_mode_is_8023z(pl->link_config.interface))
pl->ops->mac_an_restart(pl->netdev);
}
@@ -339,12 +352,14 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
}
/* The fixed state is... fixed except for the link state,
- * which may be determined by a GPIO.
+ * which may be determined by a GPIO or a callback.
*/
static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
{
*state = pl->link_config;
- if (pl->link_gpio)
+ if (pl->get_fixed_state)
+ pl->get_fixed_state(pl->netdev, state);
+ else if (pl->link_gpio)
state->link = !!gpiod_get_value(pl->link_gpio);
}
@@ -423,7 +438,7 @@ static void phylink_resolve(struct work_struct *w)
phylink_mac_config(pl, &link_state);
break;
- case MLO_AN_SGMII:
+ case MLO_AN_INBAND:
phylink_get_mac_state(pl, &link_state);
if (pl->phydev) {
bool changed = false;
@@ -449,10 +464,6 @@ static void phylink_resolve(struct work_struct *w)
}
}
break;
-
- case MLO_AN_8023Z:
- phylink_get_mac_state(pl, &link_state);
- break;
}
}
@@ -489,15 +500,27 @@ static void phylink_run_resolve(struct phylink *pl)
static const struct sfp_upstream_ops sfp_phylink_ops;
-static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
+static int phylink_register_sfp(struct phylink *pl,
+ struct fwnode_handle *fwnode)
{
- struct device_node *sfp_np;
+ struct fwnode_reference_args ref;
+ int ret;
- sfp_np = of_parse_phandle(np, "sfp", 0);
- if (!sfp_np)
+ if (!fwnode)
return 0;
- pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl,
+ ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 0, 0, &ref);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ return 0;
+
+ netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
+ ret);
+ return ret;
+ }
+
+ pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
&sfp_phylink_ops);
if (!pl->sfp_bus)
return -ENOMEM;
@@ -505,7 +528,22 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
return 0;
}
-struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
+/**
+ * phylink_create() - create a phylink instance
+ * @ndev: a pointer to the &struct net_device
+ * @fwnode: a pointer to a &struct fwnode_handle describing the network
+ * interface
+ * @iface: the desired link mode defined by &typedef phy_interface_t
+ * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ *
+ * Create a new phylink instance, and parse the link parameters found in @np.
+ * This will parse in-band modes, fixed-link or SFP configuration.
+ *
+ * Returns a pointer to a &struct phylink, or an error-pointer value. Users
+ * must use IS_ERR() to check for errors from this function.
+ */
+struct phylink *phylink_create(struct net_device *ndev,
+ struct fwnode_handle *fwnode,
phy_interface_t iface,
const struct phylink_mac_ops *ops)
{
@@ -521,11 +559,15 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
pl->netdev = ndev;
pl->phy_state.interface = iface;
pl->link_interface = iface;
- pl->link_port = PORT_MII;
+ if (iface == PHY_INTERFACE_MODE_MOCA)
+ pl->link_port = PORT_BNC;
+ else
+ pl->link_port = PORT_MII;
pl->link_config.interface = iface;
pl->link_config.pause = MLO_PAUSE_AN;
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
+ pl->link_config.an_enabled = true;
pl->ops = ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
@@ -533,21 +575,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
- ret = phylink_parse_mode(pl, np);
+ ret = phylink_parse_mode(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
}
if (pl->link_an_mode == MLO_AN_FIXED) {
- ret = phylink_parse_fixedlink(pl, np);
+ ret = phylink_parse_fixedlink(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
}
}
- ret = phylink_register_sfp(pl, np);
+ ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
kfree(pl);
return ERR_PTR(ret);
@@ -557,6 +599,13 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
}
EXPORT_SYMBOL_GPL(phylink_create);
+/**
+ * phylink_destroy() - cleanup and destroy the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Destroy a phylink instance. Any PHY that has been attached must have been
+ * cleaned up via phylink_disconnect_phy() prior to calling this function.
+ */
void phylink_destroy(struct phylink *pl)
{
if (pl->sfp_bus)
@@ -653,10 +702,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
+/**
+ * phylink_connect_phy() - connect a PHY to the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @phy: a pointer to a &struct phy_device.
+ *
+ * Connect @phy to the phylink instance specified by @pl by calling
+ * phy_attach_direct(). Configure the @phy according to the MAC driver's
+ * capabilities, start the PHYLIB state machine and enable any interrupts
+ * that the PHY supports.
+ *
+ * This updates the phylink's ethtool supported and advertising link mode
+ * masks.
+ *
+ * Returns 0 on success or a negative errno.
+ */
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
int ret;
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_interface))))
+ return -EINVAL;
+
+ if (pl->phydev)
+ return -EBUSY;
+
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
if (ret)
return ret;
@@ -669,14 +747,29 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
-int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
+/**
+ * phylink_of_phy_connect() - connect the PHY specified in the DT mode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @dn: a pointer to a &struct device_node.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified in the device node @dn to the phylink instance
+ * specified by @pl. Actions specified in phylink_connect_phy() will be
+ * performed.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
+ u32 flags)
{
struct device_node *phy_node;
struct phy_device *phy_dev;
int ret;
- /* Fixed links are handled without needing a PHY */
- if (pl->link_an_mode == MLO_AN_FIXED)
+ /* Fixed links and 802.3z are handled without needing a PHY */
+ if (pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
phy_node = of_parse_phandle(dn, "phy-handle", 0);
@@ -686,14 +779,13 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
phy_node = of_parse_phandle(dn, "phy-device", 0);
if (!phy_node) {
- if (pl->link_an_mode == MLO_AN_PHY) {
- netdev_err(pl->netdev, "unable to find PHY node\n");
+ if (pl->link_an_mode == MLO_AN_PHY)
return -ENODEV;
- }
return 0;
}
- phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface);
+ phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
+ pl->link_interface);
/* We're done with the phy_node handle */
of_node_put(phy_node);
@@ -708,11 +800,18 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
}
EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+/**
+ * phylink_disconnect_phy() - disconnect any PHY attached to the phylink
+ * instance.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disconnect any current PHY from the phylink instance described by @pl.
+ */
void phylink_disconnect_phy(struct phylink *pl)
{
struct phy_device *phy;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
phy = pl->phydev;
if (phy) {
@@ -729,6 +828,40 @@ void phylink_disconnect_phy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
+/**
+ * phylink_fixed_state_cb() - allow setting a fixed link callback
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @cb: callback to execute to determine the fixed link state.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * can be determined through e.g: an out of band MMIO register.
+ */
+int phylink_fixed_state_cb(struct phylink *pl,
+ void (*cb)(struct net_device *dev,
+ struct phylink_link_state *state))
+{
+ /* It does not make sense to let the link be overriden unless we use
+ * MLO_AN_FIXED
+ */
+ if (pl->link_an_mode != MLO_AN_FIXED)
+ return -EINVAL;
+
+ mutex_lock(&pl->state_mutex);
+ pl->get_fixed_state = cb;
+ mutex_unlock(&pl->state_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
+
+/**
+ * phylink_mac_change() - notify phylink of a change in MAC state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @up: indicates whether the link is currently up.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * changes (eg, link failure, new negotiation results, etc.)
+ */
void phylink_mac_change(struct phylink *pl, bool up)
{
if (!up)
@@ -738,9 +871,17 @@ void phylink_mac_change(struct phylink *pl, bool up)
}
EXPORT_SYMBOL_GPL(phylink_mac_change);
+/**
+ * phylink_start() - start a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Start the phylink instance specified by @pl, configuring the MAC for the
+ * desired link mode(s) and negotiation style. This should be called from the
+ * network device driver's &struct net_device_ops ndo_open() method.
+ */
void phylink_start(struct phylink *pl)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
phylink_an_mode_str(pl->link_an_mode),
@@ -753,6 +894,12 @@ void phylink_start(struct phylink *pl)
phylink_resolve_flow(pl, &pl->link_config);
phylink_mac_config(pl, &pl->link_config);
+ /* Restart autonegotiation if using 802.3z to ensure that the link
+ * parameters are properly negotiated. This is necessary for DSA
+ * switches using 802.3z negotiation to ensure they see our modes.
+ */
+ phylink_mac_an_restart(pl);
+
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
@@ -763,9 +910,18 @@ void phylink_start(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_start);
+/**
+ * phylink_stop() - stop a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Stop the phylink instance specified by @pl. This should be called from the
+ * network device driver's &struct net_device_ops ndo_stop() method. The
+ * network device's carrier state should not be changed prior to calling this
+ * function.
+ */
void phylink_stop(struct phylink *pl)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
phy_stop(pl->phydev);
@@ -778,9 +934,18 @@ void phylink_stop(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_stop);
+/**
+ * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
+ *
+ * Read the wake on lan parameters from the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is currently attached, report no
+ * support for wake on lan.
+ */
void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
wol->supported = 0;
wol->wolopts = 0;
@@ -790,11 +955,22 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
+/**
+ * phylink_ethtool_set_wol() - set wake on lan parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters
+ *
+ * Set the wake on lan parameters for the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP
+ * error.
+ *
+ * Returns zero on success or negative errno code.
+ */
int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_set_wol(pl->phydev, wol);
@@ -825,12 +1001,21 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
AUTONEG_DISABLE;
}
+/**
+ * phylink_ethtool_ksettings_get() - get the current link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings
+ *
+ * Read the current link settings for the phylink instance specified by @pl.
+ * This will be the link settings read from the MAC, PHY or fixed link
+ * settings depending on the current negotiation mode.
+ */
int phylink_ethtool_ksettings_get(struct phylink *pl,
struct ethtool_link_ksettings *kset)
{
struct phylink_link_state link_state;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev) {
phy_ethtool_ksettings_get(pl->phydev, kset);
@@ -850,14 +1035,13 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
phylink_get_ksettings(&link_state, kset);
break;
- case MLO_AN_SGMII:
+ case MLO_AN_INBAND:
/* If there is a phy attached, then use the reported
* settings from the phy with no modification.
*/
if (pl->phydev)
break;
- case MLO_AN_8023Z:
phylink_get_mac_state(pl, &link_state);
/* The MAC is reporting the link results from its own PCS
@@ -872,6 +1056,11 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+/**
+ * phylink_ethtool_ksettings_set() - set the link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes
+ */
int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
@@ -879,7 +1068,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
struct phylink_link_state config;
int ret;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (kset->base.autoneg != AUTONEG_DISABLE &&
kset->base.autoneg != AUTONEG_ENABLE)
@@ -951,6 +1140,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
mutex_lock(&pl->state_mutex);
/* Configure the MAC to match the new settings */
linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+ pl->link_config.interface = config.interface;
pl->link_config.speed = our_kset.base.speed;
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -965,11 +1155,22 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set);
+/**
+ * phylink_ethtool_nway_reset() - restart negotiation
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Restart negotiation for the phylink instance specified by @pl. This will
+ * cause any attached phy to restart negotiation with the link partner, and
+ * if the MAC is in a BaseX mode, the MAC will also be requested to restart
+ * negotiation.
+ *
+ * Returns zero on success, or negative error code.
+ */
int phylink_ethtool_nway_reset(struct phylink *pl)
{
int ret = 0;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_restart_aneg(pl->phydev);
@@ -979,10 +1180,15 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset);
+/**
+ * phylink_ethtool_get_pauseparam() - get the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
void phylink_ethtool_get_pauseparam(struct phylink *pl,
struct ethtool_pauseparam *pause)
{
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN);
pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX);
@@ -990,12 +1196,17 @@ void phylink_ethtool_get_pauseparam(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam);
+/**
+ * phylink_ethtool_set_pauseparam() - set the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
int phylink_ethtool_set_pauseparam(struct phylink *pl,
struct ethtool_pauseparam *pause)
{
struct phylink_link_state *config = &pl->link_config;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (!phylink_test(pl->supported, Pause) &&
!phylink_test(pl->supported, Asym_Pause))
@@ -1028,8 +1239,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
phylink_mac_config(pl, config);
break;
- case MLO_AN_SGMII:
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
phylink_mac_config(pl, config);
phylink_mac_an_restart(pl);
break;
@@ -1068,24 +1278,21 @@ int phylink_ethtool_get_module_eeprom(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom);
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
- int ret = -EPROTONOSUPPORT;
-
- WARN_ON(!lockdep_rtnl_is_held());
-
- if (pl->phydev)
- ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
+/**
+ * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ * counter
+ * @pl: a pointer to a &struct phylink returned from phylink_create().
+ *
+ * Read the Energy Efficient Ethernet error counter from the PHY associated
+ * with the phylink instance specified by @pl.
+ *
+ * Returns positive error counter value, or negative error code.
+ */
int phylink_get_eee_err(struct phylink *pl)
{
int ret = 0;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_get_eee_err(pl->phydev);
@@ -1094,11 +1301,16 @@ int phylink_get_eee_err(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
+/**
+ * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ */
int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_get_eee(pl->phydev, eee);
@@ -1107,11 +1319,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
}
EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
+/**
+ * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ */
int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
{
int ret = -EOPNOTSUPP;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev)
ret = phy_ethtool_set_eee(pl->phydev, eee);
@@ -1246,9 +1463,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
case MLO_AN_PHY:
return -EOPNOTSUPP;
- case MLO_AN_SGMII:
- /* No phy, fall through to 8023z method */
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
if (phy_id == 0) {
val = phylink_get_mac_state(pl, &state);
if (val < 0)
@@ -1273,27 +1488,44 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
case MLO_AN_PHY:
return -EOPNOTSUPP;
- case MLO_AN_SGMII:
- /* No phy, fall through to 8023z method */
- case MLO_AN_8023Z:
+ case MLO_AN_INBAND:
break;
}
return 0;
}
+/**
+ * phylink_mii_ioctl() - generic mii ioctl interface
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @ifr: a pointer to a &struct ifreq for socket ioctls
+ * @cmd: ioctl cmd to execute
+ *
+ * Perform the specified MII ioctl on the PHY attached to the phylink instance
+ * specified by @pl. If no PHY is attached, emulate the presence of the PHY.
+ *
+ * Returns: zero on success or negative error code.
+ *
+ * %SIOCGMIIPHY:
+ * read register from the current PHY.
+ * %SIOCGMIIREG:
+ * read register from the specified PHY.
+ * %SIOCSMIIREG:
+ * set a register on the specified PHY.
+ */
int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii = if_mii(ifr);
int ret;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
if (pl->phydev) {
- /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */
+ /* PHYs only exist for MLO_AN_PHY and SGMII */
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1316,6 +1548,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1347,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream,
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
struct phylink_link_state config;
phy_interface_t iface;
- int mode, ret = 0;
+ int ret = 0;
bool changed;
u8 port;
@@ -1355,14 +1588,13 @@ static int phylink_sfp_module_insert(void *upstream,
port = sfp_parse_port(pl->sfp_bus, id, support);
iface = sfp_parse_interface(pl->sfp_bus, id);
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
- mode = MLO_AN_SGMII;
- break;
case PHY_INTERFACE_MODE_1000BASEX:
- mode = MLO_AN_8023Z;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GKR:
break;
default:
return -EINVAL;
@@ -1380,16 +1612,18 @@ static int phylink_sfp_module_insert(void *upstream,
ret = phylink_validate(pl, support, &config);
if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
return ret;
}
netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
+ phylink_an_mode_str(MLO_AN_INBAND),
+ phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
- if (mode == MLO_AN_8023Z && pl->phydev)
+ if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
changed = !bitmap_equal(pl->supported, support,
@@ -1399,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream,
linkmode_copy(pl->link_config.advertising, config.advertising);
}
- if (pl->link_an_mode != mode ||
+ if (pl->link_an_mode != MLO_AN_INBAND ||
pl->link_config.interface != config.interface) {
pl->link_config.interface = config.interface;
- pl->link_an_mode = mode;
+ pl->link_an_mode = MLO_AN_INBAND;
changed = true;
netdev_info(pl->netdev, "switched to %s/%s link mode\n",
- phylink_an_mode_str(mode),
+ phylink_an_mode_str(MLO_AN_INBAND),
phy_modes(config.interface));
}
@@ -1424,19 +1658,18 @@ static void phylink_sfp_link_down(void *upstream)
{
struct phylink *pl = upstream;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+ queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
-
- netif_carrier_off(pl->netdev);
}
static void phylink_sfp_link_up(void *upstream)
{
struct phylink *pl = upstream;
- WARN_ON(!lockdep_rtnl_is_held());
+ ASSERT_RTNL();
clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
phylink_run_resolve(pl);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index dbef8002bc28..889a4dce1648 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -118,8 +118,6 @@ static struct phy_driver qs6612_driver[] = { {
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = qs6612_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
} };
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index eda0a6e86918..ee3ca4a2f12b 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -13,29 +13,44 @@
* option) any later version.
*
*/
+#include <linux/bitops.h>
#include <linux/phy.h>
#include <linux/module.h>
-#define RTL821x_PHYSR 0x11
-#define RTL821x_PHYSR_DUPLEX 0x2000
-#define RTL821x_PHYSR_SPEED 0xc000
-#define RTL821x_INER 0x12
-#define RTL821x_INER_INIT 0x6400
-#define RTL821x_INSR 0x13
-#define RTL821x_PAGE_SELECT 0x1f
-#define RTL8211E_INER_LINK_STATUS 0x400
+#define RTL821x_PHYSR 0x11
+#define RTL821x_PHYSR_DUPLEX BIT(13)
+#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
-#define RTL8211F_INER_LINK_STATUS 0x0010
-#define RTL8211F_INSR 0x1d
-#define RTL8211F_TX_DELAY 0x100
+#define RTL821x_INER 0x12
+#define RTL8211B_INER_INIT 0x6400
+#define RTL8211E_INER_LINK_STATUS BIT(10)
+#define RTL8211F_INER_LINK_STATUS BIT(4)
-#define RTL8201F_ISR 0x1e
-#define RTL8201F_IER 0x13
+#define RTL821x_INSR 0x13
+
+#define RTL821x_PAGE_SELECT 0x1f
+
+#define RTL8211F_INSR 0x1d
+
+#define RTL8211F_TX_DELAY BIT(8)
+
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_IER 0x13
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
+static int rtl821x_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, RTL821x_PAGE_SELECT);
+}
+
+static int rtl821x_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -58,31 +73,21 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43);
- err = phy_read(phydev, RTL8211F_INSR);
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
static int rtl8201_config_intr(struct phy_device *phydev)
{
- int err;
-
- /* switch to page 7 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x7);
+ u16 val;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, RTL8201F_IER,
- BIT(13) | BIT(12) | BIT(11));
+ val = BIT(13) | BIT(12) | BIT(11);
else
- err = phy_write(phydev, RTL8201F_IER, 0);
-
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ val = 0;
- return err;
+ return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
}
static int rtl8211b_config_intr(struct phy_device *phydev)
@@ -91,7 +96,7 @@ static int rtl8211b_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL821x_INER_INIT);
+ RTL8211B_INER_INIT);
else
err = phy_write(phydev, RTL821x_INER, 0);
@@ -113,43 +118,31 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
- int err;
+ u16 val;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, RTL821x_INER,
- RTL8211F_INER_LINK_STATUS);
+ val = RTL8211F_INER_LINK_STATUS;
else
- err = phy_write(phydev, RTL821x_INER, 0);
- phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+ val = 0;
- return err;
+ return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
}
static int rtl8211f_config_init(struct phy_device *phydev)
{
int ret;
- u16 reg;
+ u16 val = 0;
ret = genphy_config_init(phydev);
if (ret < 0)
return ret;
- phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08);
- reg = phy_read(phydev, 0x11);
-
/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- reg |= RTL8211F_TX_DELAY;
- else
- reg &= ~RTL8211F_TX_DELAY;
-
- phy_write(phydev, 0x11, reg);
- /* restore to default page 0 */
- phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+ val = RTL8211F_TX_DELAY;
- return 0;
+ return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
}
static struct phy_driver realtek_drvs[] = {
@@ -159,28 +152,24 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x0000ffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
}, {
.phy_id = 0x001cc816,
.name = "RTL8201F 10/100Mbps Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
.phy_id = 0x001cc912,
.name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
}, {
@@ -189,8 +178,6 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -201,8 +188,6 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -213,13 +198,13 @@ static struct phy_driver realtek_drvs[] = {
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
.config_init = &rtl8211f_config_init,
- .read_status = &genphy_read_status,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
},
};
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index c092af137056..f1da70b9b55f 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -213,7 +213,6 @@ static struct phy_driver rockchip_phy_driver[] = {
.soft_reset = genphy_soft_reset,
.config_init = rockchip_integrated_phy_config_init,
.config_aneg = rockchip_config_aneg,
- .read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = rockchip_phy_resume,
},
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4c1b7c..8961209ee949 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -8,10 +8,14 @@
#include "sfp.h"
+/**
+ * struct sfp_bus - internal representation of a sfp bus
+ */
struct sfp_bus {
+ /* private: */
struct kref kref;
struct list_head node;
- struct device_node *device_node;
+ struct fwnode_handle *fwnode;
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
@@ -26,6 +30,20 @@ struct sfp_bus {
bool started;
};
+/**
+ * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: optional pointer to an array of unsigned long for the
+ * ethtool support mask
+ *
+ * Parse the EEPROM identification given in @id, and return one of
+ * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
+ * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
+ * the connector type.
+ *
+ * If the port type is not known, returns %PORT_OTHER.
+ */
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
@@ -39,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFP_CONNECTOR_MT_RJ:
case SFP_CONNECTOR_MU:
case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- if (support)
- phylink_set(support, FIBRE);
port = PORT_FIBRE;
break;
case SFP_CONNECTOR_RJ45:
- if (support)
- phylink_set(support, TP);
port = PORT_TP;
break;
+ case SFP_CONNECTOR_COPPER_PIGTAIL:
+ port = PORT_DA;
+ break;
+
case SFP_CONNECTOR_UNSPEC:
if (id->base.e1000_base_t) {
- if (support)
- phylink_set(support, TP);
port = PORT_TP;
break;
}
@@ -62,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFP_CONNECTOR_MPO_1X12:
case SFP_CONNECTOR_MPO_2X16:
case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_COPPER_PIGTAIL:
case SFP_CONNECTOR_NOSEPARATE:
case SFP_CONNECTOR_MXC_2X16:
port = PORT_OTHER;
@@ -74,10 +89,40 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
}
+ if (support) {
+ switch (port) {
+ case PORT_FIBRE:
+ phylink_set(support, FIBRE);
+ break;
+
+ case PORT_TP:
+ phylink_set(support, TP);
+ break;
+ }
+ }
+
return port;
}
EXPORT_SYMBOL_GPL(sfp_parse_port);
+/**
+ * sfp_parse_interface() - Parse the phy_interface_t
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Derive the phy_interface_t mode for the information found in the
+ * module's identifying EEPROM. There is no standard or defined way
+ * to derive this information, so we use some heuristics.
+ *
+ * If the encoding is 64b66b, then the module must be >= 10G, so
+ * return %PHY_INTERFACE_MODE_10GKR.
+ *
+ * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
+ * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
+ * %PHY_INTERFACE_MODE_SGMII mode.
+ *
+ * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
+ */
phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
const struct sfp_eeprom_id *id)
{
@@ -107,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
break;
default:
+ if (id->base.e1000_base_cx) {
+ iface = PHY_INTERFACE_MODE_1000BASEX;
+ break;
+ }
+
iface = PHY_INTERFACE_MODE_NA;
dev_err(bus->sfp_dev,
"SFP module encoding does not support 8b10b nor 64b66b\n");
@@ -117,13 +167,38 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
}
EXPORT_SYMBOL_GPL(sfp_parse_interface);
+/**
+ * sfp_parse_support() - Parse the eeprom id for supported link modes
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: pointer to an array of unsigned long for the ethtool support mask
+ *
+ * Parse the EEPROM identification information and derive the supported
+ * ethtool link modes for the module.
+ */
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
+ unsigned int br_min, br_nom, br_max;
+
phylink_set(support, Autoneg);
phylink_set(support, Pause);
phylink_set(support, Asym_Pause);
+ /* Decode the bitrate information to MBd */
+ br_min = br_nom = br_max = 0;
+ if (id->base.br_nominal) {
+ if (id->base.br_nominal != 255) {
+ br_nom = id->base.br_nominal * 100;
+ br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+ br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+ } else if (id->ext.br_max) {
+ br_nom = 250 * id->ext.br_max;
+ br_max = br_nom + br_nom * id->ext.br_min / 100;
+ br_min = br_nom - br_nom * id->ext.br_min / 100;
+ }
+ }
+
/* Set ethtool support from the compliance fields. */
if (id->base.e10g_base_sr)
phylink_set(support, 10000baseSR_Full);
@@ -142,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(support, 1000baseT_Full);
}
+ /* 1000Base-PX or 1000Base-BX10 */
+ if ((id->base.e_base_px || id->base.e_base_bx10) &&
+ br_min <= 1300 && br_max >= 1200)
+ phylink_set(support, 1000baseX_Full);
+
+ /* For active or passive cables, select the link modes
+ * based on the bit rates and the cable compliance bytes.
+ */
+ if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+ /* This may look odd, but some manufacturers use 12000MBd */
+ if (br_min <= 12000 && br_max >= 10300)
+ phylink_set(support, 10000baseCR_Full);
+ if (br_min <= 3200 && br_max >= 3100)
+ phylink_set(support, 2500baseX_Full);
+ if (br_min <= 1300 && br_max >= 1200)
+ phylink_set(support, 1000baseX_Full);
+ }
+ if (id->base.sfp_ct_passive) {
+ if (id->base.passive.sff8431_app_e)
+ phylink_set(support, 10000baseCR_Full);
+ }
+ if (id->base.sfp_ct_active) {
+ if (id->base.active.sff8431_app_e ||
+ id->base.active.sff8431_lim) {
+ phylink_set(support, 10000baseCR_Full);
+ }
+ }
+
switch (id->base.extended_cc) {
case 0x00: /* Unspecified */
break;
@@ -175,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.br_nominal >= 12)
phylink_set(support, 1000baseX_Full);
}
-
- switch (id->base.connector) {
- case SFP_CONNECTOR_SC:
- case SFP_CONNECTOR_FIBERJACK:
- case SFP_CONNECTOR_LC:
- case SFP_CONNECTOR_MT_RJ:
- case SFP_CONNECTOR_MU:
- case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- break;
-
- case SFP_CONNECTOR_UNSPEC:
- if (id->base.e1000_base_t)
- break;
-
- case SFP_CONNECTOR_SG: /* guess */
- case SFP_CONNECTOR_MPO_1X12:
- case SFP_CONNECTOR_MPO_2X16:
- case SFP_CONNECTOR_HSSDC_II:
- case SFP_CONNECTOR_COPPER_PIGTAIL:
- case SFP_CONNECTOR_NOSEPARATE:
- case SFP_CONNECTOR_MXC_2X16:
- default:
- /* a guess at the supported link modes */
- dev_warn(bus->sfp_dev,
- "Guessing link modes, please report...\n");
- phylink_set(support, 1000baseT_Half);
- phylink_set(support, 1000baseT_Full);
- break;
- }
}
EXPORT_SYMBOL_GPL(sfp_parse_support);
@@ -215,7 +289,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus)
return bus->registered ? bus->upstream_ops : NULL;
}
-static struct sfp_bus *sfp_bus_get(struct device_node *np)
+static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
{
struct sfp_bus *sfp, *new, *found = NULL;
@@ -224,7 +298,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
mutex_lock(&sfp_mutex);
list_for_each_entry(sfp, &sfp_buses, node) {
- if (sfp->device_node == np) {
+ if (sfp->fwnode == fwnode) {
kref_get(&sfp->kref);
found = sfp;
break;
@@ -233,7 +307,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
if (!found && new) {
kref_init(&new->kref);
- new->device_node = np;
+ new->fwnode = fwnode;
list_add(&new->node, &sfp_buses);
found = new;
new = NULL;
@@ -246,7 +320,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
return found;
}
-static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex)
+static void sfp_bus_release(struct kref *kref)
{
struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref);
@@ -293,6 +367,16 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->registered = false;
}
+/**
+ * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @modinfo: a &struct ethtool_modinfo
+ *
+ * Fill in the type and eeprom_len parameters in @modinfo for a module on
+ * the sfp bus specified by @bus.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
{
if (!bus->registered)
@@ -301,6 +385,17 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
}
EXPORT_SYMBOL_GPL(sfp_get_module_info);
+/**
+ * sfp_get_module_eeprom() - Read the SFP module EEPROM
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @ee: a &struct ethtool_eeprom
+ * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes)
+ *
+ * Read the EEPROM as specified by the supplied @ee. See the documentation
+ * for &struct ethtool_eeprom for the region to be read.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data)
{
@@ -310,6 +405,15 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
}
EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
+/**
+ * sfp_upstream_start() - Inform the SFP that the network device is up
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be enabled by allowing TX_DISABLE to be deasserted. This
+ * should be called from the network device driver's &struct net_device_ops
+ * ndo_open() method.
+ */
void sfp_upstream_start(struct sfp_bus *bus)
{
if (bus->registered)
@@ -318,6 +422,15 @@ void sfp_upstream_start(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_start);
+/**
+ * sfp_upstream_stop() - Inform the SFP that the network device is down
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be disabled by asserting TX_DISABLE, disabling the laser
+ * in optical modules. This should be called from the network device
+ * driver's &struct net_device_ops ndo_stop() method.
+ */
void sfp_upstream_stop(struct sfp_bus *bus)
{
if (bus->registered)
@@ -326,11 +439,24 @@ void sfp_upstream_stop(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_stop);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+/**
+ * sfp_register_upstream() - Register the neighbouring device
+ * @fwnode: firmware node for the SFP bus
+ * @ndev: network device associated with the interface
+ * @upstream: the upstream private data
+ * @ops: the upstream's &struct sfp_upstream_ops
+ *
+ * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
+ * should use phylink, which will call this function for them. Returns
+ * a pointer to the allocated &struct sfp_bus.
+ *
+ * On error, returns %NULL.
+ */
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
struct net_device *ndev, void *upstream,
const struct sfp_upstream_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(np);
+ struct sfp_bus *bus = sfp_bus_get(fwnode);
int ret = 0;
if (bus) {
@@ -353,10 +479,18 @@ struct sfp_bus *sfp_register_upstream(struct device_node *np,
}
EXPORT_SYMBOL_GPL(sfp_register_upstream);
+/**
+ * sfp_unregister_upstream() - Unregister sfp bus
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Unregister a previously registered upstream connection for the SFP
+ * module. @bus is returned from sfp_register_upstream().
+ */
void sfp_unregister_upstream(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
bus->upstream = NULL;
bus->netdev = NULL;
rtnl_unlock();
@@ -433,7 +567,7 @@ EXPORT_SYMBOL_GPL(sfp_module_remove);
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(dev->of_node);
+ struct sfp_bus *bus = sfp_bus_get(dev->fwnode);
int ret = 0;
if (bus) {
@@ -459,7 +593,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->netdev)
+ sfp_unregister_bus(bus);
bus->sfp_dev = NULL;
bus->sfp = NULL;
bus->socket_ops = NULL;
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9dfc1c4c954f..6c7d9289078d 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -98,12 +98,18 @@ static const enum gpiod_flags gpio_flags[] = {
static DEFINE_MUTEX(sfp_mutex);
+struct sff_data {
+ unsigned int gpios;
+ bool (*module_supported)(const struct sfp_eeprom_id *id);
+};
+
struct sfp {
struct device *dev;
struct i2c_adapter *i2c;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
struct phy_device *mod_phy;
+ const struct sff_data *type;
unsigned int (*get_state)(struct sfp *);
void (*set_state)(struct sfp *, unsigned int);
@@ -123,6 +129,36 @@ struct sfp {
struct sfp_eeprom_id id;
};
+static bool sff_module_supported(const struct sfp_eeprom_id *id)
+{
+ return id->base.phys_id == SFP_PHYS_ID_SFF &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sff_data = {
+ .gpios = SFP_F_LOS | SFP_F_TX_FAULT | SFP_F_TX_DISABLE,
+ .module_supported = sff_module_supported,
+};
+
+static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+{
+ return id->base.phys_id == SFP_PHYS_ID_SFP &&
+ id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sfp_data = {
+ .gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT |
+ SFP_F_TX_DISABLE | SFP_F_RATE_SELECT,
+ .module_supported = sfp_module_supported,
+};
+
+static const struct of_device_id sfp_of_match[] = {
+ { .compatible = "sff,sff", .data = &sff_data, },
+ { .compatible = "sff,sfp", .data = &sfp_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sfp_of_match);
+
static unsigned long poll_jiffies;
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -141,6 +177,11 @@ static unsigned int sfp_gpio_get_state(struct sfp *sfp)
return state;
}
+static unsigned int sff_gpio_get_state(struct sfp *sfp)
+{
+ return sfp_gpio_get_state(sfp) | SFP_F_PRESENT;
+}
+
static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
{
if (state & SFP_F_PRESENT) {
@@ -315,12 +356,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
msleep(T_PHY_RESET_MS);
phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
- if (IS_ERR(phy)) {
- dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ if (phy == ERR_PTR(-ENODEV)) {
+ dev_info(sfp->dev, "no PHY detected\n");
return;
}
- if (!phy) {
- dev_info(sfp->dev, "no PHY detected\n");
+ if (IS_ERR(phy)) {
+ dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
return;
}
@@ -425,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
- char vendor[17];
- char part[17];
- char sn[17];
- char date[9];
- char rev[5];
u8 check;
int err;
@@ -465,24 +501,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
sfp->id = id;
- memcpy(vendor, sfp->id.base.vendor_name, 16);
- vendor[16] = '\0';
- memcpy(part, sfp->id.base.vendor_pn, 16);
- part[16] = '\0';
- memcpy(rev, sfp->id.base.vendor_rev, 4);
- rev[4] = '\0';
- memcpy(sn, sfp->id.ext.vendor_sn, 16);
- sn[16] = '\0';
- memcpy(date, sfp->id.ext.datecode, 8);
- date[8] = '\0';
-
- dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
- vendor, part, rev, sn, date);
-
- /* We only support SFP modules, not the legacy GBIC modules. */
- if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP ||
- sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) {
- dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n",
+ dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
+ (int)sizeof(id.base.vendor_name), id.base.vendor_name,
+ (int)sizeof(id.base.vendor_pn), id.base.vendor_pn,
+ (int)sizeof(id.base.vendor_rev), id.base.vendor_rev,
+ (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn,
+ (int)sizeof(id.ext.datecode), id.ext.datecode);
+
+ /* Check whether we support this module */
+ if (!sfp->type->module_supported(&sfp->id)) {
+ dev_err(sfp->dev,
+ "module is not supported - phys id 0x%02x 0x%02x\n",
sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
return -EINVAL;
}
@@ -683,20 +712,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN);
len -= first;
- ret = sfp->read(sfp, false, first, data, len);
+ ret = sfp_read(sfp, false, first, data, len);
if (ret < 0)
return ret;
first += len;
data += len;
}
- if (first >= ETH_MODULE_SFF_8079_LEN &&
- first < ETH_MODULE_SFF_8472_LEN) {
+ if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN);
len -= first;
first -= ETH_MODULE_SFF_8079_LEN;
- ret = sfp->read(sfp, true, first, data, len);
+ ret = sfp_read(sfp, true, first, data, len);
if (ret < 0)
return ret;
}
@@ -801,6 +829,7 @@ static void sfp_cleanup(void *data)
static int sfp_probe(struct platform_device *pdev)
{
+ const struct sff_data *sff;
struct sfp *sfp;
bool poll = false;
int irq, err, i;
@@ -815,10 +844,19 @@ static int sfp_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ sff = sfp->type = &sfp_data;
+
if (pdev->dev.of_node) {
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *id;
struct device_node *np;
+ id = of_match_node(sfp_of_match, node);
+ if (WARN_ON(!id))
+ return -EINVAL;
+
+ sff = sfp->type = id->data;
+
np = of_parse_phandle(node, "i2c-bus", 0);
if (np) {
struct i2c_adapter *i2c;
@@ -834,17 +872,22 @@ static int sfp_probe(struct platform_device *pdev)
return err;
}
}
+ }
- for (i = 0; i < GPIO_MAX; i++) {
+ for (i = 0; i < GPIO_MAX; i++)
+ if (sff->gpios & BIT(i)) {
sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
gpio_of_names[i], gpio_flags[i]);
if (IS_ERR(sfp->gpio[i]))
return PTR_ERR(sfp->gpio[i]);
}
- sfp->get_state = sfp_gpio_get_state;
- sfp->set_state = sfp_gpio_set_state;
- }
+ sfp->get_state = sfp_gpio_get_state;
+ sfp->set_state = sfp_gpio_set_state;
+
+ /* Modules that have no detect signal are always present */
+ if (!(sfp->gpio[GPIO_MODDEF0]))
+ sfp->get_state = sff_gpio_get_state;
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
if (!sfp->sfp_bus)
@@ -899,12 +942,6 @@ static int sfp_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id sfp_of_match[] = {
- { .compatible = "sff,sfp", },
- { },
-};
-MODULE_DEVICE_TABLE(of, sfp_of_match);
-
static struct platform_driver sfp_driver = {
.probe = sfp_probe,
.remove = sfp_remove,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2306bfae057f..be399d645224 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -227,8 +227,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -249,8 +247,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -276,7 +272,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -303,8 +298,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.config_init = lan911x_config_init,
/* IRQ related */
@@ -319,12 +312,11 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
@@ -351,7 +343,6 @@ static struct phy_driver smsc_phy_driver[] = {
.probe = smsc_phy_probe,
/* basic functions */
- .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d00cfb64529e..fbd548a1ad84 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -89,8 +89,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
.suspend = genphy_suspend,
@@ -102,8 +100,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 96b33475ea5e..55f48ee3595a 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -95,7 +95,6 @@ static struct phy_driver upd60620_driver[1] = { {
.features = PHY_BASIC_FEATURES,
.flags = 0,
.config_init = upd60620_config_init,
- .config_aneg = genphy_config_aneg,
.read_status = upd60620_read_status,
} };
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index f78ff0279648..d9dd8fbfffc7 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -267,7 +267,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -278,7 +277,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -289,7 +287,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -300,7 +297,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -311,7 +307,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -321,8 +316,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8601_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -333,7 +326,6 @@ static struct phy_driver vsc82xx_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -344,8 +336,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
}, {
@@ -356,8 +346,6 @@ static struct phy_driver vsc82xx_driver[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
} };
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 1b28e6e702f5..bdc4d23627c5 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -334,7 +334,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747ff4e3..ef6b2126b23a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -531,10 +531,10 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int ppp_poll(struct file *file, poll_table *wait)
+static __poll_t ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
- unsigned int mask;
+ __poll_t mask;
if (!pf)
return 0;
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
-
return 0;
err_unit:
+ mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7196f00f0991..047f6c68a441 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -327,7 +327,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da1645b15..5aa59f41bf8c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
+ int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
-
- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
- 0, GFP_KERNEL);
+ hlen = LL_RESERVED_SPACE(dev);
+ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+ dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc63102ca96e..8940417c30e5 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -731,7 +731,7 @@ static void sl_sync(void)
/* Find a free SLIP channel, and link in this `tty' line. */
-static struct slip *sl_alloc(dev_t line)
+static struct slip *sl_alloc(void)
{
int i;
char name[IFNAMSIZ];
@@ -809,7 +809,7 @@ static int slip_open(struct tty_struct *tty)
/* OK. Find a free SLIP channel to use. */
err = -ENFILE;
- sl = sl_alloc(tty_devnum(tty));
+ sl = sl_alloc();
if (sl == NULL)
goto err_exit;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 0a886fda0129..0a5ed004781c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,9 +330,6 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
if (!q)
return RX_HANDLER_PASS;
- if (__skb_array_full(&q->skb_array))
- goto drop;
-
skb_push(skb, ETH_HLEN);
/* Apply the forward feature mask so that we perform segmentation
@@ -348,7 +345,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
goto drop;
if (!segs) {
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
goto wake_up;
}
@@ -358,7 +355,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (skb_array_produce(&q->skb_array, segs)) {
+ if (ptr_ring_produce(&q->ring, segs)) {
kfree_skb(segs);
kfree_skb_list(nskb);
break;
@@ -375,7 +372,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
!(features & NETIF_F_CSUM_MASK) &&
skb_checksum_help(skb))
goto drop;
- if (skb_array_produce(&q->skb_array, skb))
+ if (ptr_ring_produce(&q->ring, skb))
goto drop;
}
@@ -497,7 +494,7 @@ static void tap_sock_destruct(struct sock *sk)
{
struct tap_queue *q = container_of(sk, struct tap_queue, sk);
- skb_array_cleanup(&q->skb_array);
+ ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
}
static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +514,7 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
@@ -546,7 +543,7 @@ static int tap_open(struct inode *inode, struct file *file)
err = tap_set_queue(tap, file, q);
if (err) {
- /* tap_sock_destruct() will take care of freeing skb_array */
+ /* tap_sock_destruct() will take care of freeing ptr_ring */
goto err_put;
}
@@ -572,10 +569,10 @@ static int tap_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int tap_poll(struct file *file, poll_table *wait)
+static __poll_t tap_poll(struct file *file, poll_table *wait)
{
struct tap_queue *q = file->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (!q)
goto out;
@@ -583,7 +580,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait)
mask = 0;
poll_wait(file, &q->wq.wait, wait);
- if (!skb_array_empty(&q->skb_array))
+ if (!ptr_ring_empty(&q->ring))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(&q->sk) ||
@@ -844,7 +841,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
- skb = skb_array_consume(&q->skb_array);
+ skb = ptr_ring_consume(&q->ring);
if (skb)
break;
if (noblock) {
@@ -1176,7 +1173,7 @@ static int tap_peek_len(struct socket *sock)
{
struct tap_queue *q = container_of(sock, struct tap_queue,
sock);
- return skb_array_peek_len(&q->skb_array);
+ return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
}
/* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1199,7 @@ struct socket *tap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tap_get_socket);
-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
{
struct tap_queue *q;
@@ -1211,29 +1208,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
q = file->private_data;
if (!q)
return ERR_PTR(-EBADFD);
- return &q->skb_array;
+ return &q->ring;
}
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
struct tap_queue *q;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tap->numqueues;
int ret, i = 0;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
list_for_each_entry(q, &tap->queue_list, next)
- arrays[i++] = &q->skb_array;
+ rings[i++] = &q->ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
- kfree(arrays);
+ kfree(rings);
return ret;
}
EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4f4a842a1c9c..0dc66e4fbb2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,8 @@ struct tun_file {
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
- struct skb_array tx_array;
+ struct ptr_ring tx_ring;
+ struct xdp_rxq_info xdp_rxq;
};
struct tun_flow_entry {
@@ -195,6 +196,11 @@ struct tun_flow_entry {
#define TUN_NUM_FLOW_ENTRIES 1024
+struct tun_prog {
+ struct rcu_head rcu;
+ struct bpf_prog *prog;
+};
+
/* Since the socket were moved to tun_file, to preserve the behavior of persist
* device, socket filter, sndbuf and vnet header size were restore when the
* file were attached to a persist device.
@@ -232,8 +238,33 @@ struct tun_struct {
u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
struct bpf_prog __rcu *xdp_prog;
+ struct tun_prog __rcu *steering_prog;
+ struct tun_prog __rcu *filter_prog;
};
+struct veth {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+bool tun_is_xdp_buff(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -537,15 +568,12 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
- struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
u32 txq = 0;
u32 numqueues = 0;
- rcu_read_lock();
numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
@@ -563,10 +591,37 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
txq -= numqueues;
}
- rcu_read_unlock();
return txq;
}
+static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+{
+ struct tun_prog *prog;
+ u16 ret = 0;
+
+ prog = rcu_dereference(tun->steering_prog);
+ if (prog)
+ ret = bpf_prog_run_clear_cb(prog->prog, skb);
+
+ return ret % tun->numqueues;
+}
+
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ u16 ret;
+
+ rcu_read_lock();
+ if (rcu_dereference(tun->steering_prog))
+ ret = tun_ebpf_select_queue(tun, skb);
+ else
+ ret = tun_automq_select_queue(tun, skb);
+ rcu_read_unlock();
+
+ return ret;
+}
+
static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
@@ -600,17 +655,39 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
+static void tun_ptr_free(void *ptr)
+{
+ if (!ptr)
+ return;
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ __skb_array_destroy_skb(ptr);
+ }
+}
+
static void tun_queue_purge(struct tun_file *tfile)
{
- struct sk_buff *skb;
+ void *ptr;
- while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
- kfree_skb(skb);
+ while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+ tun_ptr_free(ptr);
skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
+static void tun_cleanup_tx_ring(struct tun_file *tfile)
+{
+ if (tfile->tx_ring.queue) {
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+ }
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -657,8 +734,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- if (tun)
- skb_array_cleanup(&tfile->tx_array);
+ tun_cleanup_tx_ring(tfile);
sock_put(&tfile->sk);
}
}
@@ -673,7 +749,6 @@ static void tun_detach(struct tun_file *tfile, bool clean)
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
@@ -700,17 +775,16 @@ static void tun_detach_all(struct net_device *dev)
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
- if (xdp_prog)
- bpf_prog_put(xdp_prog);
-
if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
@@ -751,13 +825,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
}
if (!tfile->detached &&
- skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+
+ if (tfile->detached) {
+ /* Re-attach detached tfile, updating XDP queue_index */
+ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
+
+ if (tfile->xdp_rxq.queue_index != tfile->queue_index)
+ tfile->xdp_rxq.queue_index = tfile->queue_index;
+ } else {
+ /* Setup XDP RX-queue info, for new tfile getting attached */
+ err = xdp_rxq_info_reg(&tfile->xdp_rxq,
+ tun->dev, tfile->queue_index);
+ if (err < 0)
+ goto out;
+ err = 0;
+ }
+
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
@@ -937,23 +1027,10 @@ static int tun_net_close(struct net_device *dev)
}
/* Net device start xmit */
-static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{
- struct tun_struct *tun = netdev_priv(dev);
- int txq = skb->queue_mapping;
- struct tun_file *tfile;
- u32 numqueues = 0;
-
- rcu_read_lock();
- tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = READ_ONCE(tun->numqueues);
-
- /* Drop packet if interface is not attached */
- if (txq >= numqueues)
- goto drop;
-
#ifdef CONFIG_RPS
- if (numqueues == 1 && static_key_false(&rps_needed)) {
+ if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -969,6 +1046,37 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#endif
+}
+
+static unsigned int run_ebpf_filter(struct tun_struct *tun,
+ struct sk_buff *skb,
+ int len)
+{
+ struct tun_prog *prog = rcu_dereference(tun->filter_prog);
+
+ if (prog)
+ len = bpf_prog_run_clear_cb(prog->prog, skb);
+
+ return len;
+}
+
+/* Net device start xmit */
+static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ int txq = skb->queue_mapping;
+ struct tun_file *tfile;
+ int len = skb->len;
+
+ rcu_read_lock();
+ tfile = rcu_dereference(tun->tfiles[txq]);
+
+ /* Drop packet if interface is not attached */
+ if (txq >= tun->numqueues)
+ goto drop;
+
+ if (!rcu_dereference(tun->steering_prog))
+ tun_automq_xmit(tun, skb);
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
@@ -984,6 +1092,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
sk_filter(tfile->socket.sk, skb))
goto drop;
+ len = run_ebpf_filter(tun, skb, len);
+
+ /* Trim extra bytes since we may insert vlan proto & TCI
+ * in tun_put_user().
+ */
+ len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
+ if (len <= 0 || pskb_trim(skb, len))
+ goto drop;
+
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
goto drop;
@@ -996,7 +1113,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
- if (skb_array_produce(&tfile->tx_array, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
/* Notify and wake up reader process */
@@ -1169,6 +1286,67 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
};
+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct xdp_buff *buff = xdp->data_hard_start;
+ int headroom = xdp->data - xdp->data_hard_start;
+ struct tun_file *tfile;
+ u32 numqueues;
+ int ret = 0;
+
+ /* Assure headroom is available and buff is properly aligned */
+ if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+ return -ENOSPC;
+
+ *buff = *xdp;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Encode the XDP flag into lowest bit for consumer to differ
+ * XDP buffer from sk_buff.
+ */
+ if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ ret = -ENOSPC;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
+ u32 numqueues;
+
+ rcu_read_lock();
+
+ numqueues = READ_ONCE(tun->numqueues);
+ if (!numqueues)
+ goto out;
+
+ tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+ numqueues]);
+ /* Notify and wake up reader process */
+ if (tfile->flags & TUN_FASYNC)
+ kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -1186,6 +1364,8 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
+ .ndo_xdp_xmit = tun_xdp_xmit,
+ .ndo_xdp_flush = tun_xdp_flush,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1248,12 +1428,12 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
+static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
struct sock *sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!tun)
return POLLERR;
@@ -1264,7 +1444,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_array_empty(&tfile->tx_array))
+ if (!ptr_ring_empty(&tfile->tx_ring))
mask |= POLLIN | POLLRDNORM;
if (tun->dev->flags & IFF_UP &&
@@ -1477,6 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp.data = buf + pad;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &tfile->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1551,7 +1732,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int copylen;
bool zerocopy = false;
int err;
- u32 rxhash;
+ u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tun);
@@ -1739,7 +1920,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_unlock();
}
- rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_lock();
+ if (!rcu_dereference(tun->steering_prog))
+ rxhash = __skb_get_hash_symmetric(skb);
+ rcu_read_unlock();
if (frags) {
/* Exercise flow dissector code path. */
@@ -1783,7 +1967,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
- tun_flow_update(tun, rxhash, tfile);
+ if (rxhash)
+ tun_flow_update(tun, rxhash, tfile);
+
return total_len;
}
@@ -1804,6 +1990,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}
+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp,
+ struct iov_iter *iter)
+{
+ int vnet_hdr_sz = 0;
+ size_t size = xdp->data_end - xdp->data;
+ struct tun_pcpu_stats *stats;
+ size_t ret;
+
+ if (tun->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr gso = { 0 };
+
+ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+ return -EINVAL;
+ if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+ sizeof(gso)))
+ return -EFAULT;
+ iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ }
+
+ ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += ret;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
+
+ return ret;
+}
+
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
@@ -1868,10 +2088,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vlan_hlen) {
int ret;
- struct {
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- } veth;
+ struct veth veth;
veth.h_vlan_proto = skb->vlan_proto;
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
@@ -1901,15 +2118,14 @@ done:
return total;
}
-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
- int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
{
DECLARE_WAITQUEUE(wait, current);
- struct sk_buff *skb = NULL;
+ void *ptr = NULL;
int error = 0;
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
goto out;
if (noblock) {
error = -EAGAIN;
@@ -1920,8 +2136,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
current->state = TASK_INTERRUPTIBLE;
while (1) {
- skb = skb_array_consume(&tfile->tx_array);
- if (skb)
+ ptr = ptr_ring_consume(&tfile->tx_ring);
+ if (ptr)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
@@ -1940,12 +2156,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
out:
*err = error;
- return skb;
+ return ptr;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock, struct sk_buff *skb)
+ int noblock, void *ptr)
{
ssize_t ret;
int err;
@@ -1953,23 +2169,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to)) {
- if (skb)
- kfree_skb(skb);
+ tun_ptr_free(ptr);
return 0;
}
- if (!skb) {
+ if (!ptr) {
/* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
+ ptr = tun_ring_recv(tfile, noblock, &err);
+ if (!ptr)
return err;
}
- ret = tun_put_user(tun, tfile, skb, to);
- if (unlikely(ret < 0))
- kfree_skb(skb);
- else
- consume_skb(skb);
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ ret = tun_put_user_xdp(tun, tfile, xdp, to);
+ put_page(virt_to_head_page(xdp->data));
+ } else {
+ struct sk_buff *skb = ptr;
+
+ ret = tun_put_user(tun, tfile, skb, to);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+ else
+ consume_skb(skb);
+ }
return ret;
}
@@ -1991,6 +2215,39 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}
+static void tun_prog_free(struct rcu_head *rcu)
+{
+ struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
+
+ bpf_prog_destroy(prog->prog);
+ kfree(prog);
+}
+
+static int __tun_set_ebpf(struct tun_struct *tun,
+ struct tun_prog __rcu **prog_p,
+ struct bpf_prog *prog)
+{
+ struct tun_prog *old, *new = NULL;
+
+ if (prog) {
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->prog = prog;
+ }
+
+ spin_lock_bh(&tun->lock);
+ old = rcu_dereference_protected(*prog_p,
+ lockdep_is_held(&tun->lock));
+ rcu_assign_pointer(*prog_p, new);
+ spin_unlock_bh(&tun->lock);
+
+ if (old)
+ call_rcu(&old->rcu, tun_prog_free);
+
+ return 0;
+}
+
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1999,6 +2256,8 @@ static void tun_free_netdev(struct net_device *dev)
free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
+ __tun_set_ebpf(tun, &tun->steering_prog, NULL);
+ __tun_set_ebpf(tun, &tun->filter_prog, NULL);
}
static void tun_setup(struct net_device *dev)
@@ -2072,12 +2331,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
- struct sk_buff *skb = m->msg_control;
+ void *ptr = m->msg_control;
int ret;
if (!tun) {
ret = -EBADFD;
- goto out_free_skb;
+ goto out_free;
}
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2089,7 +2348,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2100,12 +2359,25 @@ out:
out_put_tun:
tun_put(tun);
-out_free_skb:
- if (skb)
- kfree_skb(skb);
+out_free:
+ tun_ptr_free(ptr);
return ret;
}
+static int tun_ptr_peek_len(void *ptr)
+{
+ if (likely(ptr)) {
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+ return __skb_array_len_with_tag(ptr);
+ } else {
+ return 0;
+ }
+}
+
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2116,7 +2388,7 @@ static int tun_peek_len(struct socket *sock)
if (!tun)
return 0;
- ret = skb_array_peek_len(&tfile->tx_array);
+ ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
tun_put(tun);
return ret;
@@ -2287,6 +2559,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
tun->rx_batched = 0;
+ RCU_INIT_POINTER(tun->steering_prog, NULL);
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
@@ -2479,6 +2752,26 @@ unlock:
return ret;
}
+static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+ void __user *data)
+{
+ struct bpf_prog *prog;
+ int fd;
+
+ if (copy_from_user(&fd, data, sizeof(fd)))
+ return -EFAULT;
+
+ if (fd == -1) {
+ prog = NULL;
+ } else {
+ prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ return __tun_set_ebpf(tun, prog_p, prog);
+}
+
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
@@ -2755,6 +3048,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = 0;
break;
+ case TUNSETSTEERINGEBPF:
+ ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+ break;
+
+ case TUNSETFILTEREBPF:
+ ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -2851,6 +3152,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+ memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+
return 0;
}
@@ -2998,25 +3301,26 @@ static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
- struct skb_array **arrays;
+ struct ptr_ring **rings;
int n = tun->numqueues + tun->numdisabled;
int ret, i;
- arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
- if (!arrays)
+ rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
return -ENOMEM;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- arrays[i] = &tfile->tx_array;
+ rings[i] = &tfile->tx_ring;
}
list_for_each_entry(tfile, &tun->disabled, next)
- arrays[i++] = &tfile->tx_array;
+ rings[i++] = &tfile->tx_ring;
- ret = skb_array_resize_multiple(arrays, n,
- dev->tx_queue_len, GFP_KERNEL);
+ ret = ptr_ring_resize_multiple(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
- kfree(arrays);
+ kfree(rings);
return ret;
}
@@ -3102,7 +3406,7 @@ struct socket *tun_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tun_get_socket);
-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
{
struct tun_file *tfile;
@@ -3111,9 +3415,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
- return &tfile->tx_array;
+ return &tfile->tx_ring;
}
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);
module_init(tun_init);
module_exit(tun_cleanup);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804903c4..ec56ff29aac4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
dev->rx_qlen = 4;
+ dev->tx_qlen = 4;
}
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 304ec6555cd8..76ac48095c29 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -826,7 +826,7 @@ err:
static const struct driver_info qmi_wwan_info = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -835,7 +835,7 @@ static const struct driver_info qmi_wwan_info = {
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -1204,12 +1205,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
@@ -1242,6 +1245,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9abf7986..0657203ffb91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
PHY_RESET,
SCHEDULE_NAPI,
GREEN_ETHERNET,
+ DELL_TB_RX_AGG_BUG,
};
/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
dev_kfree_skb_any(skb);
remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
+
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ break;
}
if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ ocp_data |= RX_AGG_DISABLE;
+
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+ udev->serial && !strcmp(udev->serial, "000001000000")) {
+ dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+ set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32bf48d..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent)) {
- if (net_ratelimit())
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- } else {
+ if (!schedule_work (&dev->kevent))
+ netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+ else
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
- }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f5438d0978ca..a69ad39ee57e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
+ peer->gso_max_size = dev->gso_max_size;
+ peer->gso_max_segs = dev->gso_max_segs;
+
err = register_netdevice(peer);
put_net(net);
net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 559b215c0169..626c27352ae2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <net/route.h>
+#include <net/xdp.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -65,16 +66,39 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_UFO
};
-struct virtnet_stats {
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync rx_syncp;
- u64 tx_bytes;
- u64 tx_packets;
+struct virtnet_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+struct virtnet_sq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
- u64 rx_bytes;
- u64 rx_packets;
+#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
+#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
+ { "packets", VIRTNET_SQ_STAT(packets) },
+ { "bytes", VIRTNET_SQ_STAT(bytes) },
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
+ { "packets", VIRTNET_RQ_STAT(packets) },
+ { "bytes", VIRTNET_RQ_STAT(bytes) },
};
+#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
+#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -86,6 +110,8 @@ struct send_queue {
/* Name of the send queue: output.$index */
char name[40];
+ struct virtnet_sq_stats stats;
+
struct napi_struct napi;
};
@@ -98,6 +124,8 @@ struct receive_queue {
struct bpf_prog __rcu *xdp_prog;
+ struct virtnet_rq_stats stats;
+
/* Chain pages by the private ptr. */
struct page *pages;
@@ -115,6 +143,8 @@ struct receive_queue {
/* Name of this receive queue: input.$index */
char name[40];
+
+ struct xdp_rxq_info xdp_rxq;
};
struct virtnet_info {
@@ -149,9 +179,6 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Active statistics */
- struct virtnet_stats __percpu *stats;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -261,9 +288,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
int opaque;
opaque = virtqueue_enable_cb_prepare(vq);
- if (napi_complete_done(napi, processed) &&
- unlikely(virtqueue_poll(vq, opaque)))
- virtqueue_napi_schedule(napi, vq);
+ if (napi_complete_done(napi, processed)) {
+ if (unlikely(virtqueue_poll(vq, opaque)))
+ virtqueue_napi_schedule(napi, vq);
+ } else {
+ virtqueue_disable_cb(vq);
+ }
}
static void skb_xmit_done(struct virtqueue *vq)
@@ -556,6 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data = xdp.data_hard_start + xdp_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -689,6 +720,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp.data = data + vi->hdr_len;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.rxq = &rq->xdp_rxq;
+
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act != XDP_PASS)
@@ -1118,7 +1151,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
void *buf;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
@@ -1141,10 +1173,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
schedule_delayed_work(&vi->refill, 0);
}
- u64_stats_update_begin(&stats->rx_syncp);
- stats->rx_bytes += bytes;
- stats->rx_packets += received;
- u64_stats_update_end(&stats->rx_syncp);
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.bytes += bytes;
+ rq->stats.packets += received;
+ u64_stats_update_end(&rq->stats.syncp);
return received;
}
@@ -1153,8 +1185,6 @@ static void free_old_xmit_skbs(struct send_queue *sq)
{
struct sk_buff *skb;
unsigned int len;
- struct virtnet_info *vi = sq->vq->vdev->priv;
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
unsigned int packets = 0;
unsigned int bytes = 0;
@@ -1173,10 +1203,10 @@ static void free_old_xmit_skbs(struct send_queue *sq)
if (!packets)
return;
- u64_stats_update_begin(&stats->tx_syncp);
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- u64_stats_update_end(&stats->tx_syncp);
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
+ u64_stats_update_end(&sq->stats.syncp);
}
static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -1222,13 +1252,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i;
+ int i, err;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
+
+ err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+ if (err < 0)
+ return err;
+
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
}
@@ -1460,24 +1495,25 @@ static void virtnet_stats(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct virtnet_info *vi = netdev_priv(dev);
- int cpu;
unsigned int start;
+ int i;
- for_each_possible_cpu(cpu) {
- struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
u64 tpackets, tbytes, rpackets, rbytes;
+ struct receive_queue *rq = &vi->rq[i];
+ struct send_queue *sq = &vi->sq[i];
do {
- start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
- tpackets = stats->tx_packets;
- tbytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
+ start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ tpackets = sq->stats.packets;
+ tbytes = sq->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
- rpackets = stats->rx_packets;
- rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
+ start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ rpackets = rq->stats.packets;
+ rbytes = rq->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1557,6 +1593,7 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
@@ -1814,6 +1851,83 @@ static int virtnet_set_channels(struct net_device *dev,
return err;
}
+static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ char *p = (char *)data;
+ unsigned int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ i, virtnet_rq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
+ i, virtnet_sq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ }
+}
+
+static int virtnet_get_sset_count(struct net_device *dev, int sset)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
+ VIRTNET_SQ_STATS_LEN);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void virtnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ unsigned int idx = 0, start, i, j;
+ const u8 *stats_base;
+ size_t offset;
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct receive_queue *rq = &vi->rq[i];
+
+ stats_base = (u8 *)&rq->stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
+ for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
+ offset = virtnet_rq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
+ idx += VIRTNET_RQ_STATS_LEN;
+ }
+
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ struct send_queue *sq = &vi->sq[i];
+
+ stats_base = (u8 *)&sq->stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
+ for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
+ offset = virtnet_sq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
+ idx += VIRTNET_SQ_STATS_LEN;
+ }
+}
+
static void virtnet_get_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
@@ -1891,10 +2005,31 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
+ speed));
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+ duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
+ duplex));
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .get_strings = virtnet_get_strings,
+ .get_sset_count = virtnet_get_sset_count,
+ .get_ethtool_stats = virtnet_get_ethtool_stats,
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
@@ -2144,6 +2279,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
vi->status = v;
if (vi->status & VIRTIO_NET_S_LINK_UP) {
+ virtnet_update_settings(vi);
netif_carrier_on(vi->dev);
netif_tx_wake_all_queues(vi->dev);
} else {
@@ -2386,6 +2522,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
+
+ u64_stats_init(&vi->rq[i].stats.syncp);
+ u64_stats_init(&vi->sq[i].stats.syncp);
}
return 0;
@@ -2510,7 +2649,7 @@ static int virtnet_validate(struct virtio_device *vdev)
static int virtnet_probe(struct virtio_device *vdev)
{
- int i, err;
+ int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
@@ -2587,17 +2726,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->dev = dev;
vi->vdev = vdev;
vdev->priv = vi;
- vi->stats = alloc_percpu(struct virtnet_stats);
- err = -ENOMEM;
- if (vi->stats == NULL)
- goto free;
-
- for_each_possible_cpu(i) {
- struct virtnet_stats *virtnet_stats;
- virtnet_stats = per_cpu_ptr(vi->stats, i);
- u64_stats_init(&virtnet_stats->tx_syncp);
- u64_stats_init(&virtnet_stats->rx_syncp);
- }
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -2634,7 +2762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
*/
dev_err(&vdev->dev, "device MTU appears to have changed "
"it is now %d < %d", mtu, dev->min_mtu);
- goto free_stats;
+ goto free;
}
dev->mtu = mtu;
@@ -2658,7 +2786,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free;
#ifdef CONFIG_SYSFS
if (vi->mergeable_rx_bufs)
@@ -2692,6 +2820,7 @@ static int virtnet_probe(struct virtio_device *vdev)
schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
+ virtnet_update_settings(vi);
netif_carrier_on(dev);
}
@@ -2712,8 +2841,6 @@ free_vqs:
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
-free_stats:
- free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
@@ -2746,7 +2873,6 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
- free_percpu(vi->stats);
free_netdev(vi->dev);
}
@@ -2793,7 +2919,8 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
- VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
+ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+ VIRTIO_NET_F_SPEED_DUPLEX
static unsigned int features[] = {
VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029ded7c..cf95290b160c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
- rq->buf_info[i] = NULL;
}
if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
(rq->rx_ring[0].size + rq->rx_ring[1].size);
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq->buf_info_pa);
+ rq->buf_info[0] = rq->buf_info[1] = NULL;
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 9c51b8be0038..5ba222920e80 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -416,8 +416,8 @@ struct vmxnet3_adapter {
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
-#define VMXNET3_DEF_RX_RING_SIZE 256
-#define VMXNET3_DEF_RX_RING2_SIZE 128
+#define VMXNET3_DEF_RX_RING_SIZE 1024
+#define VMXNET3_DEF_RX_RING2_SIZE 256
#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index feb1b2e15c2e..139c61c8244a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct sock *sk,
struct sk_buff *skb)
{
- /* don't divert multicast */
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ /* don't divert multicast or local broadcast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+ ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b9cc51079e..fab7a4db249e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2155,6 +2155,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2196,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
@@ -3103,6 +3115,11 @@ static void vxlan_config_apply(struct net_device *dev,
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
+ if (max_mtu < ETH_MIN_MTU)
+ max_mtu = ETH_MIN_MTU;
+
+ if (!changelink && !conf->mtu)
+ dev->mtu = max_mtu;
}
if (dev->mtu > max_mtu)
@@ -3692,18 +3709,16 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
-static void __net_exit vxlan_exit_net(struct net *net)
+static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
unsigned int h;
- LIST_HEAD(list);
- rtnl_lock();
for_each_netdev_safe(net, dev, aux)
if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, &list);
+ unregister_netdevice_queue(dev, head);
list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
/* If vxlan->dev is in the same netns, it has already been added
@@ -3711,20 +3726,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
*/
if (!net_eq(dev_net(vxlan->dev), net)) {
gro_cells_destroy(&vxlan->gro_cells);
- unregister_netdevice_queue(vxlan->dev, &list);
+ unregister_netdevice_queue(vxlan->dev, head);
}
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
-
for (h = 0; h < PORT_HASH_SIZE; ++h)
WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
+static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ vxlan_destroy_tunnels(net, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit = vxlan_exit_net,
+ .exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6ea16260ec76..f6b000ddcd15 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -924,7 +924,7 @@ static int chrdev_tx_done(struct channel_data *chan, int size)
return 1;
}
-static unsigned int cosa_poll(struct file *file, poll_table *poll)
+static __poll_t cosa_poll(struct file *file, poll_table *poll)
{
pr_info("cosa_poll is here\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 87f56d0e17a6..deb5ae21a559 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -47,12 +47,19 @@ config ATH10K_DEBUG
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K && DEBUG_FS
- select RELAY
---help---
Enabled debugfs support
If unsure, say Y to make it easier to debug problems.
+config ATH10K_SPECTRAL
+ bool "Atheros ath10k spectral scan support"
+ depends on ATH10K_DEBUGFS
+ select RELAY
+ default n
+ ---help---
+ Say Y to enable access to the FFT/spectral data via debugfs.
+
config ATH10K_TRACING
bool "Atheros ath10k tracing support"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9492177e9063..6739ac26fd29 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -15,12 +15,13 @@ ath10k_core-y += mac.o \
p2p.o \
swap.o
-ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ff6815e95684..35d10490f6c3 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 2d3a2f31123d..af4978d6a14b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 9c0839b2ca8f..9a396817aa55 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a8afd690290f..b9def7bace2f 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* Guts of ath10k_ce_send.
* The caller takes responsibility for any needed locking.
*/
-int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
- void *per_transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -384,6 +384,87 @@ exit:
return ret;
}
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc_64 *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ __le32 *addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ addr = (__le32 *)&sdesc.addr;
+
+ flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ addr[0] = __cpu_to_le32(buffer);
+ addr[1] = __cpu_to_le32(flags);
+ if (flags & CE_SEND_FLAG_GATHER)
+ addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+ else
+ addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+}
+
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+ void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le64(paddr);
+ desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
{
struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
dest_ring->write_index = write_index;
}
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ce->ce_lock);
- ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+ ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ce->ce_lock);
return ret;
@@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
-int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- unsigned int *nbytesp)
+static int
+ _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+ struct ce_desc_64 sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /* This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_ctx,
+ unsigned int *nbytesp)
+{
+ return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_ctx,
+ nbytesp);
+}
+
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
int ret;
spin_lock_bh(&ce->ce_lock);
- ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+ ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
nbytesp);
+
spin_unlock_bh(&ce->ce_lock);
return ret;
}
-int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp,
- u32 *bufferp)
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
{
struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ return ce_state->ops->ce_revoke_recv_next(ce_state,
+ per_transfer_contextp,
+ bufferp);
+}
+
/*
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
+ dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
write_index = src_ring->write_index;
if (write_index != sw_index) {
- struct ce_desc *base = src_ring->base_addr_owner_space;
- struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
-
- /* Return data from completed source descriptor */
- *bufferp = __le32_to_cpu(desc->addr);
- *nbytesp = __le16_to_cpu(desc->nbytes);
- *transfer_idp = MS(__le16_to_cpu(desc->flags),
- CE_DESC_FLAGS_META_DATA);
+ ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+ bufferp, nbytesp,
+ transfer_idp);
if (per_transfer_contextp)
*per_transfer_contextp =
@@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
- memset(src_ring->base_addr_owner_space, 0,
- nentries * sizeof(struct ce_desc));
+ if (ar->hw_params.target_64bit)
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
- memset(dest_ring->base_addr_owner_space, 0,
- nentries * sizeof(struct ce_desc));
+ if (ar->hw_params.target_64bit)
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned = base_addr;
- src_ring->base_addr_owner_space = PTR_ALIGN(
- src_ring->base_addr_owner_space_unaligned,
- CE_DESC_RING_ALIGN);
- src_ring->base_addr_ce_space = ALIGN(
- src_ring->base_addr_ce_space_unaligned,
- CE_DESC_RING_ALIGN);
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!src_ring)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
return src_ring;
}
@@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
dest_ring->base_addr_ce_space_unaligned = base_addr;
- dest_ring->base_addr_owner_space = PTR_ALIGN(
- dest_ring->base_addr_owner_space_unaligned,
- CE_DESC_RING_ALIGN);
- dest_ring->base_addr_ce_space = ALIGN(
- dest_ring->base_addr_ce_space_unaligned,
- CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!dest_ring)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /* Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ memset(dest_ring->base_addr_owner_space_unaligned, 0,
+ nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
return dest_ring;
}
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_deinit_dest_ring(ar, ce_id);
}
-int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
- const struct ce_attr *attr)
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
- int ret;
-
- /*
- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
- * additional TX locking checks.
- *
- * For the lack of a better place do the check here.
- */
- BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
- if (attr->src_nentries)
- ce_state->send_cb = attr->send_cb;
-
- if (attr->dest_nentries)
- ce_state->recv_cb = attr->recv_cb;
-
- if (attr->src_nentries) {
- ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
- if (IS_ERR(ce_state->src_ring)) {
- ret = PTR_ERR(ce_state->src_ring);
- ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
- ce_id, ret);
- ce_state->src_ring = NULL;
- return ret;
- }
+ if (ce_state->src_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
}
- if (attr->dest_nentries) {
- ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
- attr);
- if (IS_ERR(ce_state->dest_ring)) {
- ret = PTR_ERR(ce_state->dest_ring);
- ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
- ce_id, ret);
- ce_state->dest_ring = NULL;
- return ret;
- }
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
}
- return 0;
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
}
-void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
if (ce_state->src_ring) {
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
- sizeof(struct ce_desc) +
+ sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
if (ce_state->dest_ring) {
dma_free_coherent(ar->dev,
(ce_state->dest_ring->nentries *
- sizeof(struct ce_desc) +
+ sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->dest_ring = NULL;
}
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
spin_unlock_bh(&ce->ce_lock);
}
+
+static const struct ath10k_ce_ops ce_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+ .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data,
+ .ce_free_pipe = _ath10k_ce_free_pipe,
+ .ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+ .ce_completed_recv_next_nolock =
+ _ath10k_ce_completed_recv_next_nolock_64,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+ .ce_free_pipe = _ath10k_ce_free_pipe_64,
+ .ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ switch (ar->hw_rev) {
+ case ATH10K_HW_WCN3990:
+ ce_state->ops = &ce_64_ops;
+ break;
+ default:
+ ce_state->ops = &ce_ops;
+ break;
+ }
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ int ret;
+
+ ath10k_ce_set_ops(ar, ce_state);
+ /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = attr->send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = attr->recv_cb;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring =
+ ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+ ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index bdec794704d9..2c3c8f5e90ea 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0)
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
@@ -50,6 +54,16 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
+struct ce_desc_64 {
+ __le64 addr;
+ __le16 nbytes; /* length in register map */
+ __le16 flags; /* fw_metadata_high */
+ __le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
@@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
+ const struct ath10k_ce_ops *ops;
};
/* Copy Engine settable attributes */
@@ -160,7 +175,7 @@ struct ath10k_ce {
*/
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
@@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
- u32 buffer,
+ dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
@@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
@@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
*/
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp);
+ dma_addr_t *bufferp);
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
@@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
*/
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
+ dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
@@ -281,6 +296,32 @@ struct ce_attr {
void (*recv_cb)(struct ath10k_ce_pipe *);
};
+struct ath10k_ce_ops {
+ struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+ int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *nbytesp);
+ int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *nbytesp);
+ void (*ce_extract_desc_data)(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index, dma_addr_t *bufferp,
+ u32 *nbytesp, u32 *transfer_idp);
+ void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+ int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+ void *per_transfer_context,
+ dma_addr_t buffer, u32 nbytes,
+ u32 transfer_id, u32 flags);
+};
+
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@@ -292,6 +333,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx) - (int)(fromidx)) & (nentries_mask))
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b29fdbd21ead..b0fdc1023619 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@
#include "htt.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "coredump.h"
unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param;
@@ -39,17 +40,25 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -75,6 +84,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -99,6 +113,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -122,6 +141,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -145,6 +169,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -168,6 +197,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -194,6 +228,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -223,6 +262,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -257,6 +301,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 1560,
.vht160_mcs_tx_highest = 1560,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -290,6 +339,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 780,
.vht160_mcs_tx_highest = 780,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -313,6 +367,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -338,6 +397,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -368,6 +432,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ },
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "wcn3990 hw1.0",
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+ .rx_chain_mask = 0x7,
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+ .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+ .target_64bit = true,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
},
};
@@ -390,6 +479,8 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+ [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+ [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -860,6 +951,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
return 0;
}
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ const char *variant = NULL;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@@ -1163,7 +1276,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
len -= sizeof(*hdr);
data = hdr->data;
- if (len < ALIGN(ie_len, 4)) {
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ if (len < ie_len) {
ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL;
@@ -1202,9 +1318,6 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
goto out;
}
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
len -= ie_len;
data += ie_len;
}
@@ -1231,19 +1344,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+ if (ar->id.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ar->id.bdf_ext);
+
if (ar->id.bmi_ids_valid) {
scnprintf(name, name_len,
- "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
ath10k_bus_str(ar->hif.bus),
ar->id.bmi_chip_id,
- ar->id.bmi_board_id);
+ ar->id.bmi_board_id, variant);
goto out;
}
- if (ar->id.bdf_ext[0] != '\0')
- scnprintf(variant, sizeof(variant), ",variant=%s",
- ar->id.bdf_ext);
-
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
@@ -1335,6 +1448,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
len -= sizeof(*hdr);
data += sizeof(*hdr);
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
if (len < ie_len) {
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
@@ -1440,15 +1556,12 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
break;
}
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
len -= ie_len;
data += ie_len;
}
- if (!fw_file->firmware_data ||
- !fw_file->firmware_len) {
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+ (!fw_file->firmware_data || !fw_file->firmware_len)) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@@ -1474,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
break;
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
+ case ATH10K_BUS_SNOC:
scnprintf(fw_name, fw_name_len, "%s-%d.bin",
ATH10K_FW_FILE_BASE, fw_api);
break;
@@ -1759,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
- ret = ath10k_debug_fw_devcoredump(ar);
+ ret = ath10k_coredump_submit(ar);
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
@@ -2001,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw = fw;
- ath10k_bmi_start(ar);
-
- if (ath10k_init_configure_target(ar)) {
- status = -EINVAL;
- goto err;
- }
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_bmi_start(ar);
- status = ath10k_download_cal_data(ar);
- if (status)
- goto err;
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
- /* Some of of qca988x solutions are having global reset issue
- * during target initialization. Bypassing PLL setting before
- * downloading firmware and letting the SoC run on REF_CLK is
- * fixing the problem. Corresponding firmware change is also needed
- * to set the clock source once the target is initialized.
- */
- if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->running_fw->fw_file.fw_features)) {
- status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
- if (status) {
- ath10k_err(ar, "could not write to skip_clock_init: %d\n",
- status);
+ status = ath10k_download_cal_data(ar);
+ if (status)
goto err;
+
+ /* Some of of qca988x solutions are having global reset issue
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also
+ * needed to set the clock source once the target is
+ * initialized.
+ */
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+ if (status) {
+ ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+ status);
+ goto err;
+ }
}
- }
- status = ath10k_download_fw(ar);
- if (status)
- goto err;
+ status = ath10k_download_fw(ar);
+ if (status)
+ goto err;
- status = ath10k_init_uart(ar);
- if (status)
- goto err;
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
- if (ar->hif.bus == ATH10K_BUS_SDIO)
- ath10k_init_sdio(ar);
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ath10k_init_sdio(ar);
+ }
ar->htc.htc_ops.target_send_suspend_complete =
ath10k_send_suspend_complete;
@@ -2048,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err;
}
- status = ath10k_bmi_done(ar);
- if (status)
- goto err;
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+ }
status = ath10k_wmi_attach(ar);
if (status) {
@@ -2293,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
- memset(&target_info, 0, sizeof(target_info));
- if (ar->hif.bus == ATH10K_BUS_SDIO)
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
- else
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_USB:
+ memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
- if (ret) {
- ath10k_err(ar, "could not get target info (%d)\n", ret);
- goto err_power_down;
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_SNOC:
+ break;
+ default:
+ ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
}
- ar->target_version = target_info.version;
- ar->hw->wiphy->hw_version = target_info.version;
-
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2325,33 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_hwfw_info(ar);
- ret = ath10k_core_pre_cal_download(ar);
- if (ret) {
- /* pre calibration data download is not necessary
- * for all the chipsets. Ignore failures and continue.
- */
- ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "could not load pre cal data: %d\n", ret);
- }
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
- ret = ath10k_core_get_board_id_from_otp(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath10k_err(ar, "failed to get board id from otp: %d\n",
- ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
- ret = ath10k_core_check_smbios(ar);
- if (ret)
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+ ret = ath10k_core_check_smbios(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
- ret = ath10k_core_fetch_board_file(ar);
- if (ret) {
- ath10k_err(ar, "failed to fetch board file: %d\n", ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
- ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_board_info(ar);
+ }
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
@@ -2360,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
- if (ret) {
- ath10k_err(ar, "failed to initialize code swap segment: %d\n",
- ret);
- goto err_free_firmware_files;
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->normal_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
}
mutex_lock(&ar->conf_mutex);
@@ -2416,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_release_fw;
}
+ status = ath10k_coredump_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to register coredump\n");
+ goto err_unregister_mac;
+ }
+
status = ath10k_debug_register(ar);
if (status) {
ath10k_err(ar, "unable to initialize debugfs\n");
- goto err_unregister_mac;
+ goto err_unregister_coredump;
}
status = ath10k_spectral_create(ar);
@@ -2442,6 +2596,8 @@ err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
ath10k_debug_destroy(ar);
+err_unregister_coredump:
+ ath10k_coredump_unregister(ar);
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
@@ -2596,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_dummy_netdev(&ar->napi_dev);
- ret = ath10k_debug_create(ar);
+ ret = ath10k_coredump_create(ar);
if (ret)
goto err_free_aux_wq;
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_coredump;
+
return ar;
+err_free_coredump:
+ ath10k_coredump_destroy(ar);
+
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
@@ -2623,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
+ ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 643041ef3271..fe6b30356d3b 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -67,7 +67,6 @@
/* NAPI poll budget */
#define ATH10K_NAPI_BUDGET 64
-#define ATH10K_NAPI_QUOTA_LIMIT 60
/* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -93,6 +92,7 @@ enum ath10k_bus {
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
+ ATH10K_BUS_SNOC,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -106,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
return "sdio";
case ATH10K_BUS_USB:
return "usb";
+ case ATH10K_BUS_SNOC:
+ return "snoc";
}
return "unknown";
@@ -364,11 +366,11 @@ struct ath10k_sta {
struct rate_info txrate;
struct work_struct update_wk;
+ u64 rx_duration;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
- u64 rx_duration;
#endif
};
@@ -458,14 +460,17 @@ struct ath10k_ce_crash_hdr {
struct ath10k_ce_crash_data entries[];
};
+#define MAX_MEM_DUMP_TYPE 5
+
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
- bool crashed_since_read;
-
guid_t guid;
- struct timespec timestamp;
+ struct timespec64 timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+ u8 *ramdump_buf;
+ size_t ramdump_buf_len;
};
struct ath10k_debug {
@@ -488,12 +493,9 @@ struct ath10k_debug {
/* protected by conf_mutex */
u64 fw_dbglog_mask;
u32 fw_dbglog_level;
- u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
-
- struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
@@ -615,6 +617,12 @@ enum ath10k_fw_features {
/* Firmware does not support power save in station mode. */
ATH10K_FW_FEATURE_NO_PS = 17,
+ /* Firmware allows management tx by reference instead of by value. */
+ ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
+ /* Firmware load is done externally, not by bmi */
+ ATH10K_FW_FEATURE_NON_BMI = 19,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -963,6 +971,14 @@ struct ath10k {
} spectral;
#endif
+ u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct ath10k_fw_crash_data *fw_crash_data;
+ } coredump;
+#endif
+
struct {
/* protected by conf_mutex */
struct ath10k_fw_components utf_mode_fw;
@@ -1016,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
return false;
}
+extern unsigned long ath10k_coredump_mask;
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644
index 000000000000..4dde126dab17
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A064},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* RTC_SOC_BASE_ADDRESS */
+ .start = 0x0,
+
+ /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+ .len = 0x800 - 0x0,
+
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* STEREO_BASE_ADDRESS */
+ .start = 0x27000,
+
+ /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+ .len = 0x60000 - 0x27000,
+
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw21_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x90000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+ },
+ },
+
+ /* IRAM dump must be put last */
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x50000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x4000,
+ .len = 0x2000,
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x8000,
+ .len = 0x58000,
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+ {
+ .hw_id = QCA6174_HW_1_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_3_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_2_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw21_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA988X_HW_2_0_VERSION,
+ .region_table = {
+ .regions = qca988x_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+ },
+ },
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw;
+ const struct ath10k_mem_region *mem_region;
+ size_t size = 0;
+ int i;
+
+ hw = ath10k_coredump_get_mem_layout(ar);
+
+ if (!hw)
+ return 0;
+
+ mem_region = &hw->region_table.regions[0];
+
+ for (i = 0; i < hw->region_table.size; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+ /* make sure it is aligned 16 bytes for debug message print out */
+ size = ALIGN(size, 16);
+
+ return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
+ if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ return NULL;
+
+ if (WARN_ON(ar->target_version == 0))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+ if (ar->target_version == hw_mem_layouts[i].hw_id)
+ return &hw_mem_layouts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return NULL;
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ guid_copy(&dump_data->guid, &crash_data->guid);
+ dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ }
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+ }
+
+ /* Gather ram dump */
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ dump = ath10k_coredump_build(ar);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+ return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+ if (!ar->coredump.fw_crash_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+ if (ar->coredump.fw_crash_data->ramdump_buf) {
+ vfree(ar->coredump.fw_crash_data->ramdump_buf);
+ ar->coredump.fw_crash_data->ramdump_buf = NULL;
+ ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+ }
+
+ vfree(ar->coredump.fw_crash_data);
+ ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644
index 000000000000..bfee13038e59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+ /* contains multiple struct ath10k_dump_ram_data_hdr */
+ ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ guid_t guid;
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+ /* enum ath10k_mem_region_type */
+ __le32 region_type;
+
+ __le32 start;
+
+ /* length of payload data, not including this header */
+ __le32 length;
+
+ u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED 0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+ ATH10K_MEM_REGION_TYPE_REG = 1,
+ ATH10K_MEM_REGION_TYPE_DRAM = 2,
+ ATH10K_MEM_REGION_TYPE_AXI = 3,
+ ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
+ ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+ u32 start;
+ u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+ enum ath10k_mem_region_type type;
+ u32 start;
+ u32 len;
+
+ const char *name;
+
+ struct {
+ const struct ath10k_mem_section *sections;
+ u32 size;
+ } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+ u32 hw_id;
+
+ struct {
+ const struct ath10k_mem_region *regions;
+ int size;
+ } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index df514507d3f1..6d836a26272f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,10 +18,8 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
-#include <linux/utsname.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
-#include <linux/devcoredump.h>
#include "core.h"
#include "debug.h"
@@ -33,86 +31,6 @@
#define ATH10K_DEBUG_CAL_DATA_LEN 12064
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
- ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
- ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
-
- ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
- /* see ath10k_fw_crash_dump_type above */
- __le32 type;
-
- /* in bytes */
- __le32 tlv_len;
-
- /* pad to 32-bit boundaries as needed */
- u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
- /* dump file information */
-
- /* "ATH10K-FW-DUMP" */
- char df_magic[16];
-
- __le32 len;
-
- /* file dump version */
- __le32 version;
-
- /* some info we can get from ath10k struct that might help */
-
- guid_t guid;
-
- __le32 chip_id;
-
- /* 0 for now, in place for later hardware */
- __le32 bus_type;
-
- __le32 target_version;
- __le32 fw_version_major;
- __le32 fw_version_minor;
- __le32 fw_version_release;
- __le32 fw_version_build;
- __le32 phy_capability;
- __le32 hw_min_tx_power;
- __le32 hw_max_tx_power;
- __le32 ht_cap_info;
- __le32 vht_cap_info;
- __le32 num_rf_chains;
-
- /* firmware version string */
- char fw_ver[ETHTOOL_FWVERS_LEN];
-
- /* Kernel related information */
-
- /* time-of-day stamp */
- __le64 tv_sec;
-
- /* time-of-day stamp, nano-seconds */
- __le64 tv_nsec;
-
- /* LINUX_VERSION_CODE */
- __le32 kernel_ver_code;
-
- /* VERMAGIC_STRING */
- char kernel_ver[64];
-
- /* room for growth w/out changing binary format */
- u8 unused[128];
-
- /* struct ath10k_tlv_dump_data + more */
- u8 data[0];
-} __packed;
-
void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
@@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
- lockdep_assert_held(&ar->data_lock);
-
- crash_data->crashed_since_read = true;
- guid_gen(&crash_data->guid);
- getnstimeofday(&crash_data->timestamp);
-
- return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
- bool mark_read)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
- struct ath10k_ce_crash_hdr *ce_hdr;
- struct ath10k_dump_file_data *dump_data;
- struct ath10k_tlv_dump_data *dump_tlv;
- size_t hdr_len = sizeof(*dump_data);
- size_t len, sofar = 0;
- unsigned char *buf;
-
- len = hdr_len;
- len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
- len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]);
-
- sofar += hdr_len;
-
- /* This is going to get big when we start dumping FW RAM and such,
- * so go ahead and use vmalloc.
- */
- buf = vzalloc(len);
- if (!buf)
- return NULL;
-
- spin_lock_bh(&ar->data_lock);
-
- if (!crash_data->crashed_since_read) {
- spin_unlock_bh(&ar->data_lock);
- vfree(buf);
- return NULL;
- }
-
- dump_data = (struct ath10k_dump_file_data *)(buf);
- strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
- sizeof(dump_data->df_magic));
- dump_data->len = cpu_to_le32(len);
-
- dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
- guid_copy(&dump_data->guid, &crash_data->guid);
- dump_data->chip_id = cpu_to_le32(ar->chip_id);
- dump_data->bus_type = cpu_to_le32(0);
- dump_data->target_version = cpu_to_le32(ar->target_version);
- dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
- dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
- dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
- dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
- dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
- dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
- dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
- dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
- dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
- dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
- strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
- sizeof(dump_data->fw_ver));
-
- dump_data->kernel_ver_code = 0;
- strlcpy(dump_data->kernel_ver, init_utsname()->release,
- sizeof(dump_data->kernel_ver));
-
- dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
- dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
- /* Gather crash-dump */
- dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
- dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
- memcpy(dump_tlv->tlv_data, &crash_data->registers,
- sizeof(crash_data->registers));
- sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
- dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
- dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]));
- ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
- ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
- memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
- memcpy(ce_hdr->entries, crash_data->ce_crash_data,
- CE_COUNT * sizeof(ce_hdr->entries[0]));
- sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
- CE_COUNT * sizeof(ce_hdr->entries[0]);
-
- ar->debug.fw_crash_data->crashed_since_read = !mark_read;
-
- spin_unlock_bh(&ar->data_lock);
-
- return dump_data;
-}
-
-int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
- struct ath10k_dump_file_data *dump;
- void *dump_ptr;
- u32 dump_len;
-
- /* To keep the dump file available also for debugfs don't mark the
- * file read, only debugfs should do that.
- */
- dump = ath10k_build_dump_file(ar, false);
- if (!dump) {
- ath10k_warn(ar, "no crash dump data found for devcoredump");
- return -ENODATA;
- }
-
- /* Make a copy of the dump file for dev_coredumpv() as during the
- * transition period we need to own the original file. Once
- * fw_crash_dump debugfs file is removed no need to have a copy
- * anymore.
- */
- dump_len = le32_to_cpu(dump->len);
- dump_ptr = vzalloc(dump_len);
-
- if (!dump_ptr)
- return -ENOMEM;
-
- memcpy(dump_ptr, dump, dump_len);
-
- dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
-
- return 0;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
- struct ath10k *ar = inode->i_private;
- struct ath10k_dump_file_data *dump;
-
- ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
-
- dump = ath10k_build_dump_file(ar, true);
- if (!dump)
- return -ENODATA;
-
- file->private_data = dump;
-
- return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath10k_dump_file_data *dump_file = file->private_data;
-
- return simple_read_from_buffer(user_buf, count, ppos,
- dump_file,
- le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
- .open = ath10k_fw_crash_dump_open,
- .read = ath10k_fw_crash_dump_read,
- .release = ath10k_fw_crash_dump_release,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
static ssize_t ath10k_reg_addr_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1950,14 +1685,14 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
}
- if (ar->debug.pktlog_filter) {
+ if (ar->pktlog_filter) {
ret = ath10k_wmi_pdev_pktlog_enable(ar,
- ar->debug.pktlog_filter);
+ ar->pktlog_filter);
if (ret)
/* not serious */
ath10k_warn(ar,
"failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
+ ar->pktlog_filter, ret);
} else {
ret = ath10k_wmi_pdev_pktlog_disable(ar);
if (ret)
@@ -2097,12 +1832,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
- ar->debug.pktlog_filter = filter;
+ ar->pktlog_filter = filter;
ret = count;
goto out;
}
- if (filter == ar->debug.pktlog_filter) {
+ if (filter == ar->pktlog_filter) {
ret = count;
goto out;
}
@@ -2111,7 +1846,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
if (ret) {
ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
+ ar->pktlog_filter, ret);
goto out;
}
} else {
@@ -2122,7 +1857,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
}
}
- ar->debug.pktlog_filter = filter;
+ ar->pktlog_filter = filter;
ret = count;
out:
@@ -2139,7 +1874,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
- ar->debug.pktlog_filter);
+ ar->pktlog_filter);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
@@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
int ath10k_debug_create(struct ath10k *ar)
{
- ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
- if (!ar->debug.fw_crash_data)
- return -ENOMEM;
-
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
if (!ar->debug.cal_data)
return -ENOMEM;
@@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
void ath10k_debug_destroy(struct ath10k *ar)
{
- vfree(ar->debug.fw_crash_data);
- ar->debug.fw_crash_data = NULL;
-
vfree(ar->debug.cal_data);
ar->debug.cal_data = NULL;
@@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
&fops_simulate_fw_crash);
- debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
- &fops_fw_crash_dump);
-
debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
&fops_reg_addr);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 548ad5483a4a..e54308889e59 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -42,6 +42,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_SDIO_DUMP = 0x00020000,
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
+ ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -51,7 +52,8 @@ enum ath10k_pktlog_filter {
ATH10K_PKTLOG_RCFIND = 0x000000004,
ATH10K_PKTLOG_RCUPDATE = 0x000000008,
ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
- ATH10K_PKTLOG_ANY = 0x00000001f,
+ ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+ ATH10K_PKTLOG_ANY = 0x00000005f,
};
enum ath10k_dbg_aggr_mode {
@@ -60,6 +62,21 @@ enum ath10k_dbg_aggr_mode {
ATH10K_DBG_AGGR_MODE_MAX,
};
+/* Types of packet log events */
+enum ath_pktlog_type {
+ ATH_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type; /* Type of log information foll this header */
+ __le16 size; /* Size of variable length log information in bytes */
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
/* FIXME: How to calculate the buffer size sanely? */
#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
@@ -84,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
void ath10k_debug_tpc_stats_process(struct ath10k *ar,
struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
-int ath10k_debug_fw_devcoredump(struct ath10k *ar);
-
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -157,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
{
}
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- return NULL;
-}
-
static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
{
return 0;
@@ -173,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
-static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
- return 0;
-}
-
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
@@ -190,9 +191,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
void ath10k_sta_update_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats);
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo);
#else
static inline
void ath10k_sta_update_rx_duration(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index d59ac6b83340..b260b09dd4d3 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
ath10k_sta_update_stats_rx_duration(ar, stats);
}
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct station_info *sinfo)
-{
- struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- struct ath10k *ar = arsta->arvif->ar;
-
- if (!ath10k_peer_stats_enabled(ar))
- return;
-
- sinfo->rx_duration = arsta->rx_duration;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
-
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- }
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
-}
-
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 6679dd9cfd12..6da4e3369c5a 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index e5c80f582ff5..492dc5b4bbf2 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 24663b07eeac..a2f8814b3e53 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index cd160b16db1e..625198dea18b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
WARN_ON(1);
return -EINVAL;
}
+ ath10k_htt_set_tx_ops(htt);
+ ath10k_htt_set_rx_ops(htt);
+
return 0;
}
@@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
- status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+ status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
- status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+ status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
if (status) {
ath10k_warn(ar, "failed to setup rx ring: %d\n",
status);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 6305308422c4..8cc2a8b278e4 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
struct htt_data_tx_desc_frag frags[6];
};
+struct htt_msdu_ext_desc_64 {
+ __le32 tso_flag[5];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
@@ -179,6 +187,22 @@ struct htt_data_tx_desc {
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
+struct htt_data_tx_desc_64 {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le64 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
enum htt_rx_ring_flags {
HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_SIZE_MIN 128
#define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring {
+struct htt_rx_ring_setup_ring32 {
__le32 fw_idx_shadow_reg_paddr;
__le32 rx_ring_base_paddr;
__le16 rx_ring_len; /* in 4-byte words */
@@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring64 {
+ __le64 fw_idx_shadow_reg_paddr;
+ __le64 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
struct htt_rx_ring_setup_hdr {
u8 num_rings; /* supported values: 1, 2 */
__le16 rsvd0;
} __packed;
-struct htt_rx_ring_setup {
+struct htt_rx_ring_setup_32 {
struct htt_rx_ring_setup_hdr hdr;
- struct htt_rx_ring_setup_ring rings[0];
+ struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring64 rings[0];
} __packed;
/*
@@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
u8 reserved;
} __packed;
+struct htt_rx_in_ord_msdu_desc_ext {
+ __le64 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
struct htt_rx_in_ord_ind {
u8 info;
__le16 peer_id;
u8 vdev_id;
u8 reserved;
__le16 msdu_count;
- struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+ union {
+ struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+ struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+ } __packed;
} __packed;
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
@@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
u8 pad[2];
} __packed;
-struct htt_frag_desc_bank_cfg {
+struct htt_frag_desc_bank_cfg32 {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
u8 num_banks;
u8 desc_size;
@@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
struct htt_q_state_conf q_state;
} __packed;
+struct htt_frag_desc_bank_cfg64 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
@@ -1497,6 +1569,23 @@ struct htt_peer_tx_stats {
u8 payload[0];
} __packed;
+#define ATH10K_10_2_TX_STATS_OFFSET 136
+#define PEER_STATS_FOR_NO_OF_PPDUS 4
+
+struct ath10k_10_2_peer_tx_stats {
+ u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le32 tx_duration;
+ u8 tx_ppdu_cnt;
+ u8 peer_id;
+} __packed;
+
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
@@ -1514,11 +1603,13 @@ struct htt_cmd {
struct htt_ver_req ver_req;
struct htt_mgmt_tx_desc mgmt_tx;
struct htt_data_tx_desc data_tx;
- struct htt_rx_ring_setup rx_setup;
+ struct htt_rx_ring_setup_32 rx_setup_32;
+ struct htt_rx_ring_setup_64 rx_setup_64;
struct htt_stats_req stats_req;
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
- struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+ struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
struct htt_tx_fetch_resp tx_fetch_resp;
};
} __packed;
@@ -1576,13 +1667,20 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
-struct ath10k_htt_txbuf {
+struct ath10k_htt_txbuf_32 {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc cmd_tx;
} __packed;
+struct ath10k_htt_txbuf_64 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@@ -1627,7 +1725,10 @@ struct ath10k_htt {
* rx buffers the host SW provides for the MAC HW to
* fill.
*/
- __le32 *paddrs_ring;
+ union {
+ __le64 *paddrs_ring_64;
+ __le32 *paddrs_ring_32;
+ };
/*
* Base address of ring, as a "physical" device address
@@ -1695,7 +1796,7 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls
*/
- struct sk_buff_head rx_compl_q;
+ struct sk_buff_head rx_msdus_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@@ -1704,12 +1805,20 @@ struct ath10k_htt {
struct {
dma_addr_t paddr;
- struct htt_msdu_ext_desc *vaddr;
+ union {
+ struct htt_msdu_ext_desc *vaddr_desc_32;
+ struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+ };
+ size_t size;
} frag_desc;
struct {
dma_addr_t paddr;
- struct ath10k_htt_txbuf *vaddr;
+ union {
+ struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+ struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+ };
+ size_t size;
} txbuf;
struct {
@@ -1724,6 +1833,28 @@ struct ath10k_htt {
} tx_q_state;
bool tx_mem_allocated;
+ const struct ath10k_htt_tx_ops *tx_ops;
+ const struct ath10k_htt_rx_ops *rx_ops;
+};
+
+struct ath10k_htt_tx_ops {
+ int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+ int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+ int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+ void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+ int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu);
+ int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+ void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+struct ath10k_htt_rx_ops {
+ size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+ void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+ void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+ int idx);
+ void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+ void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1803,8 +1934,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
@@ -1829,11 +1958,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
-int ath10k_htt_tx(struct ath10k_htt *htt,
- enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
-
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 7d295ee71534..6d96f9560950 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -25,9 +25,6 @@
#include <linux/log2.h>
-#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
-#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
-
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -36,7 +33,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
{
struct ath10k_skb_rxcb *rxcb;
@@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
}
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
struct htt_rx_desc *rx_desc;
@@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
- htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
- (u32)paddr);
+ paddr);
}
num--;
@@ -227,16 +278,15 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- skb_queue_purge(&htt->rx_compl_q);
+ skb_queue_purge(&htt->rx_msdus_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
- (htt->rx_ring.size *
- sizeof(htt->rx_ring.paddrs_ring)),
- htt->rx_ring.paddrs_ring,
+ htt->rx_ops->htt_get_rx_ring_size(htt),
+ htt->rx_ops->htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
dma_free_coherent(htt->ar->dev,
@@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
- htt->rx_ring.paddrs_ring[idx] = 0;
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
idx++;
idx &= htt->rx_ring.size_mask;
@@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
- u32 paddr)
+ u64 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
@@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
return msdu;
}
-static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
- struct htt_rx_in_ord_ind *ev,
- struct sk_buff_head *list)
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
- struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
int msdu_count;
@@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
return 0;
}
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u64 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
dma_addr_t paddr;
- void *vaddr;
+ void *vaddr, *vaddr_ring;
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
@@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
*/
htt->rx_ring.size = HTT_RX_RING_SIZE;
htt->rx_ring.size_mask = htt->rx_ring.size - 1;
- htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+ htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
- size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+ size = htt->rx_ops->htt_get_rx_ring_size(htt);
- vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
- if (!vaddr)
+ vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+ if (!vaddr_ring)
goto err_dma_ring;
- htt->rx_ring.paddrs_ring = vaddr;
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
htt->rx_ring.base_paddr = paddr;
vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -515,7 +614,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
- skb_queue_head_init(&htt->rx_compl_q);
+ skb_queue_head_init(&htt->rx_msdus_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
@@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
err_dma_idx:
dma_free_coherent(htt->ar->dev,
- (htt->rx_ring.size *
- sizeof(htt->rx_ring.paddrs_ring)),
- htt->rx_ring.paddrs_ring,
+ htt->rx_ops->htt_get_rx_ring_size(htt),
+ vaddr_ring,
htt->rx_ring.base_paddr);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
@@ -974,16 +1072,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
return out;
}
-static void ath10k_process_rx(struct ath10k *ar,
- struct ieee80211_rx_status *rx_status,
- struct sk_buff *skb)
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ __skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
char tid[32];
status = IEEE80211_SKB_RXCB(skb);
- *status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
@@ -1517,7 +1624,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
}
-static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status)
{
@@ -1540,7 +1647,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
status->flag |= RX_FLAG_ALLOW_SAME_PN;
}
- ath10k_process_rx(ar, status, msdu);
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
}
@@ -1652,7 +1759,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
- int ret, num_msdus;
+ int ret;
__skb_queue_head_init(&amsdu);
@@ -1674,7 +1781,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
- num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
/* only for ret = 1 indicates chained msdus */
@@ -1683,9 +1789,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
- return num_msdus;
+ return 0;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1893,15 +1999,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
-static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
- struct sk_buff_head *list)
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
- int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1940,10 +2045,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
- ath10k_process_rx(ar, status, msdu);
- num_msdu++;
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
- return num_msdu;
}
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
@@ -1959,7 +2062,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
- int ret, num_msdus = 0;
+ int ret;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -1981,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
vdev_id, peer_id, tid, offload, frag, msdu_count);
- if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return -EINVAL;
}
@@ -1990,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* extracted and processed.
*/
__skb_queue_head_init(&list);
- ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+ if (ar->hw_params.target_64bit)
+ ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+ &list);
+ else
+ ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+ &list);
+
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
@@ -2001,7 +2110,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* separately.
*/
if (offload)
- num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
+ ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@@ -2014,11 +2123,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
- num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
- ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
/* fall through */
@@ -2030,7 +2138,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return -EIO;
}
}
- return num_msdus;
+ return ret;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2449,6 +2557,62 @@ out:
rcu_read_unlock();
}
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+ struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct ath10k_10_2_peer_tx_stats *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ u16 log_type = __le16_to_cpu(hdr->log_type);
+ u32 peer_id = 0, i;
+
+ if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+ return;
+
+ tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+ ATH10K_10_2_TX_STATS_OFFSET);
+
+ if (!tx_stats->tx_ppdu_cnt)
+ return;
+
+ peer_id = tx_stats->peer_id;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+ p_tx_stats->succ_bytes =
+ __le16_to_cpu(tx_stats->success_bytes[i]);
+ p_tx_stats->retry_bytes =
+ __le16_to_cpu(tx_stats->retry_bytes[i]);
+ p_tx_stats->failed_bytes =
+ __le16_to_cpu(tx_stats->failed_bytes[i]);
+ p_tx_stats->ratecode = tx_stats->ratecode[i];
+ p_tx_stats->flags = tx_stats->flags[i];
+ p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+ p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+ p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -2566,6 +2730,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb->len -
offsetof(struct htt_resp,
pktlog_msg.payload));
+
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_fetch_10_2_tx_stats(ar,
+ resp->pktlog_msg.payload);
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2631,6 +2799,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+ struct sk_buff *skb;
+
+ while (quota < budget) {
+ if (skb_queue_empty(&ar->htt.rx_msdus_q))
+ break;
+
+ skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+ if (!skb)
+ break;
+ ath10k_process_rx(ar, skb);
+ quota++;
+ }
+
+ return quota;
+}
+
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
@@ -2638,63 +2824,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
- int quota = 0, done, num_rx_msdus;
+ int quota = 0, done, ret;
bool resched_napi = false;
__skb_queue_head_init(&tx_ind_q);
- /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
- * process it first to utilize full available quota.
+ /* Process pending frames before dequeuing more data
+ * from hardware.
*/
- while (quota < budget) {
- if (skb_queue_empty(&htt->rx_in_ord_compl_q))
- break;
-
- skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
- if (!skb) {
- resched_napi = true;
- goto exit;
- }
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+ if (quota == budget) {
+ resched_napi = true;
+ goto exit;
+ }
+ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
- num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+ ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
- if (num_rx_msdus < 0) {
- resched_napi = true;
- goto exit;
- }
dev_kfree_skb_any(skb);
- if (num_rx_msdus > 0)
- quota += num_rx_msdus;
-
- if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
- !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+ if (ret == -EIO) {
resched_napi = true;
goto exit;
}
}
- while (quota < budget) {
- /* no more data to receive */
- if (!atomic_read(&htt->num_mpdus_ready))
- break;
-
- num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
- if (num_rx_msdus < 0) {
+ while (atomic_read(&htt->num_mpdus_ready)) {
+ ret = ath10k_htt_rx_handle_amsdu(htt);
+ if (ret == -EIO) {
resched_napi = true;
goto exit;
}
-
- quota += num_rx_msdus;
atomic_dec(&htt->num_mpdus_ready);
- if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
- atomic_read(&htt->num_mpdus_ready)) {
- resched_napi = true;
- goto exit;
- }
}
+ /* Deliver received data after processing data from hardware */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
/* From NAPI documentation:
* The napi poll() function may also process TX completions, in which
* case if it processes the entire TX ring then it should count that
@@ -2732,3 +2899,29 @@ exit:
return done;
}
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->rx_ops = &htt_rx_ops_64;
+ else
+ htt->rx_ops = &htt_rx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 685faac1368f..d334b7be1fea 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
-static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
- if (!htt->txbuf.vaddr)
+ if (!htt->txbuf.vaddr_txbuff_32)
return;
- size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
- dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
- htt->txbuf.vaddr = NULL;
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_32 = NULL;
}
-static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
- size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
- htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
- GFP_KERNEL);
- if (!htt->txbuf.vaddr)
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_32);
+
+ htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_32)
return -ENOMEM;
+ htt->txbuf.size = size;
+
return 0;
}
-static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
size_t size;
- if (!htt->frag_desc.vaddr)
+ if (!htt->txbuf.vaddr_txbuff_64)
return;
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_64);
+
+ htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_32)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
dma_free_coherent(htt->ar->dev,
size,
- htt->frag_desc.vaddr,
+ htt->frag_desc.vaddr_desc_32,
htt->frag_desc.paddr);
- htt->frag_desc.vaddr = NULL;
+
+ htt->frag_desc.vaddr_desc_32 = NULL;
}
-static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
@@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
if (!ar->hw_params.continuous_frag_desc)
return 0;
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
- htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
- &htt->frag_desc.paddr,
- GFP_KERNEL);
- if (!htt->frag_desc.vaddr)
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_32) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_64)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_64,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_64) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
return 0;
}
@@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
int ret;
- ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+ ret = htt->tx_ops->htt_alloc_txbuff(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
return ret;
}
- ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+ ret = htt->tx_ops->htt_alloc_frag_desc(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
@@ -387,10 +473,10 @@ free_txq:
ath10k_htt_tx_free_txq(htt);
free_frag_desc:
- ath10k_htt_tx_free_cont_frag_desc(htt);
+ htt->tx_ops->htt_free_frag_desc(htt);
free_txbuf:
- ath10k_htt_tx_free_cont_txbuf(htt);
+ htt->tx_ops->htt_free_txbuff(htt);
return ret;
}
@@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
if (!htt->tx_mem_allocated)
return;
- ath10k_htt_tx_free_cont_txbuf(htt);
+ htt->tx_ops->htt_free_txbuff(htt);
ath10k_htt_tx_free_txq(htt);
- ath10k_htt_tx_free_cont_frag_desc(htt);
+ htt->tx_ops->htt_free_frag_desc(htt);
ath10k_htt_tx_free_txdone_fifo(htt);
htt->tx_mem_allocated = false;
}
@@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
- struct htt_frag_desc_bank_cfg *cfg;
+ struct htt_frag_desc_bank_cfg32 *cfg;
int ret, size;
u8 info;
@@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return -EINVAL;
}
- size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
skb = ath10k_htc_alloc_skb(ar, size);
if (!skb)
return -ENOMEM;
@@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
- cfg = &cmd->frag_desc_bank_cfg;
+ cfg = &cmd->frag_desc_bank_cfg32;
cfg->info = info;
cfg->num_banks = 1;
cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
- struct htt_rx_ring_setup_ring *ring;
+ struct htt_frag_desc_bank_cfg64 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg64;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+ cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring32 *ring =
+ (struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring64 *ring =
+ (struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
@@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
- len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
@@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
- ring = &cmd->rx_setup.rings[0];
+ ring = &cmd->rx_setup_32.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
- cmd->rx_setup.hdr.num_rings = 1;
+ cmd->rx_setup_32.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
@@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ath10k_htt_fill_rx_desc_offset_32(ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+ return 0;
+}
-#undef desc_offset
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring64 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /* HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_64.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_64.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_64(ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -895,8 +1136,9 @@ err:
return res;
}
-int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *msdu)
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
@@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
- struct ath10k_htt_txbuf *txbuf;
+ struct ath10k_htt_txbuf_32 *txbuf;
struct htt_data_tx_desc_frag *frags;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
+ struct htt_msdu_ext_desc *ext_desc_t = NULL;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- txbuf = &htt->txbuf.vaddr[msdu_id];
+ txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
txbuf_paddr = htt->txbuf.paddr +
- (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+ (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
@@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
- memset(&htt->frag_desc.vaddr[msdu_id], 0,
+ ext_desc_t = htt->frag_desc.vaddr_desc_32;
+ memset(&ext_desc_t[msdu_id], 0,
sizeof(struct htt_msdu_ext_desc));
frags = (struct htt_data_tx_desc_frag *)
- &htt->frag_desc.vaddr[msdu_id].frags;
- ext_desc = &htt->frag_desc.vaddr[msdu_id];
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi = 0;
@@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
- flags0, flags1, msdu->len, msdu_id, frags_paddr,
- (u32)skb_cb->paddr, vdev_id, tid, freq);
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -1093,3 +1337,239 @@ err_free_msdu_id:
err:
return res;
}
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_64 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ dma_addr_t frags_paddr = 0;
+ u32 txbuf_paddr;
+ struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+ struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+ spin_lock_bh(&htt->tx_lock);
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ spin_unlock_bh(&htt->tx_lock);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* pass through */
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_64;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc_64));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+ frags[1].tword_addr.paddr_lo = 0;
+ frags[1].tword_addr.paddr_hi = 0;
+ frags[1].tword_addr.len_16 = 0;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+ /* fill fragment descriptor */
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+err:
+ return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+ .htt_tx = ath10k_htt_tx_32,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+ .htt_tx = ath10k_htt_tx_64,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->tx_ops = &htt_tx_ops_64;
+ else
+ htt->tx_ops = &htt_tx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 88955bbe20bd..497ac33e0fbf 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
};
+
+const struct ath10k_hw_ops wcn3990_ops = {};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 05f26e5858ad..6203bc65799b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0"
+
#define ATH10K_FW_FILE_BASE "firmware"
#define ATH10K_FW_API_MAX 6
#define ATH10K_FW_API_MIN 2
@@ -553,6 +557,16 @@ struct ath10k_hw_params {
/* Number of ciphers supported (i.e First N) in cipher_suites array */
int n_cipher_suites;
+
+ u32 num_peers;
+ u32 ast_skid_limit;
+ u32 num_wds_entries;
+
+ /* Targets supporting physical addressing capability above 32-bits */
+ bool target_64bit;
+
+ /* Target rx ring fill level */
+ u32 rx_ring_fill_level;
};
struct htt_rx_desc;
@@ -567,6 +581,7 @@ struct ath10k_hw_ops {
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
@@ -663,6 +678,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS 14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
@@ -868,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
+#define FW_RAM_CONFIG_ADDRESS 0x0018
#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0a947eef348d..ebb3f1b046f3 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
- if (vif->bss_conf.qos)
+ if (sta->wme)
arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
break;
case WMI_VDEV_TYPE_IBSS:
@@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->running_fw->fw_file.fw_features))
+ ar->running_fw->fw_file.fw_features) ||
+ test_bit(WMI_SERVICE_MGMT_TX_WMI,
+ ar->wmi.svc_map))
return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
return ATH10K_MAC_TX_HTT;
@@ -3595,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
switch (txpath) {
case ATH10K_MAC_TX_HTT:
- ret = ath10k_htt_tx(htt, txmode, skb);
+ ret = htt->tx_ops->htt_tx(htt, txmode, skb);
break;
case ATH10K_MAC_TX_HTT_MGMT:
ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (ath10k_peer_stats_enabled(ar)) {
+ ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+ goto err_stop;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
peer->removed = true;
}
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = {
.unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
.sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
+ .sta_statistics = ath10k_sta_statistics,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = {
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
- .sta_statistics = ath10k_sta_statistics,
#endif
};
@@ -8244,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
- ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+ if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
}
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -8329,15 +8380,6 @@ int ath10k_mac_register(struct ath10k *ar)
ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
}
- /* Current wake_tx_queue implementation imposes a significant
- * performance penalty in some setups. The tx scheduling code needs
- * more work anyway so disable the wake_tx_queue unless firmware
- * supports the pull-push mechanism.
- */
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
- ar->running_fw->fw_file.fw_features))
- ar->ops->wake_tx_queue = NULL;
-
ret = ath10k_mac_init_rd(ar);
if (ret) {
ath10k_err(ar, "failed to derive regdom: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 553747bc19ed..81f8d6c0af35 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ffea348b2190..355db6a0fcf3 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
+#include "coredump.h"
#include "targaddrs.h"
#include "bmi.h"
@@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
+
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ATH10K_SKB_RXCB(skb)->paddr = paddr;
spin_lock_bh(&ce->ce_lock);
- ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
spin_unlock_bh(&ce->ce_lock);
if (ret) {
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1461,6 +1467,215 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
crash_data->registers[i] = reg_dump_values[i];
}
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section != NULL; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS, config);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS);
+ if (val != config) {
+ ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+ val, config);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count, shift;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* To get IRAM dump, the host driver needs to switch target
+ * ram config from DRAM to IRAM.
+ */
+ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+ current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+ shift = current_region->start >> 20;
+
+ ret = ath10k_pci_set_ram_config(ar, shift);
+ if (ret) {
+ ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ if (current_region->section_table.size > 0) {
+ /* Copy each section individually. */
+ count = ath10k_pci_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+ } else {
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ ret = ath10k_pci_diag_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+
+ count = current_region->len;
+ }
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data;
@@ -1470,7 +1685,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ar->stats.fw_crash_counter++;
- crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+ crash_data = ath10k_coredump_new(ar);
if (crash_data)
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@@ -1481,6 +1696,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_print_driver_info(ar);
ath10k_pci_dump_registers(ar, crash_data);
ath10k_ce_dump_registers(ar, crash_data);
+ ath10k_pci_dump_memory(ar, crash_data);
spin_unlock_bh(&ar->data_lock);
@@ -1858,7 +2074,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
if (ret) {
- u32 unused_buffer;
+ dma_addr_t unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
@@ -1871,7 +2087,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
err_resp:
if (resp) {
- u32 unused_buffer;
+ dma_addr_t unused_buffer;
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
dma_unmap_single(ar->dev, resp_paddr,
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 08704fbc11e3..e52fd83156b6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 28da14398951..545deb6d7af1 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -210,6 +210,10 @@ struct rx_frag_info {
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+ u8 ring4_more_count;
+ u8 ring5_more_count;
+ u8 ring6_more_count;
+ u8 ring7_more_count;
} __packed;
/*
@@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
__le32 info2; /* %RX_MSDU_START_INFO2_ */
} __packed;
+struct rx_msdu_start_wcn3990 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+ __le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
struct rx_msdu_start_qca99x0 qca99x0;
+ struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
@@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
__le32 info2;
} __packed;
+struct rx_msdu_end_wcn3990 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+ __le32 rule_indication_0;
+ __le32 rule_indication_1;
+ __le32 rule_indication_2;
+ __le32 rule_indication_3;
+} __packed;
+
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
struct rx_msdu_end_qca99x0 qca99x0;
+ struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
@@ -963,6 +986,12 @@ struct rx_pkt_end {
__le32 phy_timestamp_2;
} __packed;
+struct rx_pkt_end_wcn3990 {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le64 phy_timestamp_1;
+ __le64 phy_timestamp_2;
+} __packed;
+
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
@@ -998,6 +1027,12 @@ struct rx_location_info {
__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
+struct rx_location_info_wcn3990 {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+ __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
enum rx_phy_ppdu_end_info0 {
RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
@@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
+struct rx_ppdu_end_wcn3990 {
+ struct rx_pkt_end_wcn3990 rx_pkt_end;
+ struct rx_location_info_wcn3990 rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset;
+ __le32 reserved_info_0;
+ __le32 reserved_info_1;
+ __le32 rx_antenna_info;
+ __le32 rx_coex_info;
+ __le32 rx_mpdu_cnt_info;
+ __le64 phy_timestamp_tx;
+ __le32 rx_bb_length;
+} __packed;
+
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
@@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
+ struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 2048b1e5262b..af6995de7e00 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 89b0ad769d4f..13276f4dc12c 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -44,7 +44,7 @@ enum ath10k_spectral_mode {
SPECTRAL_MANUAL,
};
-#ifdef CONFIG_ATH10K_DEBUGFS
+#ifdef CONFIG_ATH10K_SPECTRAL
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_phyerr_ev_arg *phyerr,
@@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar)
{
}
-#endif /* CONFIG_ATH10K_DEBUGFS */
+#endif /* CONFIG_ATH10K_SPECTRAL */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index adf4592374b4..e7f57efadae1 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index f5dc0476493e..fa602f15fa93 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8bded5da9d0d..c2b5bad0459b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 9d3eb258ac2f..568810b41657 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
index 191a8f34c8ea..6514d1a14242 100644
--- a/drivers/net/wireless/ath/ath10k/testmode_i.h
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index ef717b631ff8..aa8978a8d751 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 3abb97f63b1e..65e2419543f9 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00cef0bd8..e40edced1d82 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index d4986f626c35..5b3b021526ab 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index e7ea1ae1c438..2bf401e436d3 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 2fc3f24ff1ca..14093cfdc505 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct sk_buff *skb;
int ret;
+ u32 mgmt_tx_cmdid;
if (!ar->wmi.ops->gen_mgmt_tx)
return -EOPNOTSUPP;
@@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features))
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+ else
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+ ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7616c1c4bbd3..ae77a007ae07 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
return -ENOMEM;
}
+struct wmi_tlv_svc_rdy_parse {
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ bool svc_bmap_done;
+ bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+ svc_rdy->ev = ptr;
+ break;
+ case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+ svc_rdy->reg = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ svc_rdy->mem_reqs = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!svc_rdy->svc_bmap_done) {
+ svc_rdy->svc_bmap_done = true;
+ svc_rdy->svc_bmap = ptr;
+ } else if (!svc_rdy->dbs_hw_mode_done) {
+ svc_rdy->dbs_hw_mode_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg)
{
- const void **tb;
const struct hal_reg_capabilities *reg;
const struct wmi_tlv_svc_rdy_ev *ev;
const __le32 *svc_bmap;
const struct wlan_host_mem_req *mem_reqs;
+ struct wmi_tlv_svc_rdy_parse svc_rdy = { };
int ret;
- tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+ if (ret) {
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
- ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
- reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
- svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
- mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+ ev = svc_rdy.ev;
+ reg = svc_rdy.reg;
+ svc_bmap = svc_rdy.svc_bmap;
+ mem_reqs = svc_rdy.mem_reqs;
- if (!ev || !reg || !svc_bmap || !mem_reqs) {
- kfree(tb);
+ if (!ev || !reg || !svc_bmap || !mem_reqs)
return -EPROTO;
- }
/* This is an internal ABI compatibility check for WMI TLV so check it
* here instead of the generic WMI code.
@@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- kfree(tb);
return -ENOTSUPP;
}
@@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
ath10k_wmi_tlv_parse_mem_reqs, arg);
if (ret) {
- kfree(tb);
ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
return ret;
}
- kfree(tb);
return 0;
}
@@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
- cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+ cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+ cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_peer_keys = __cpu_to_le32(2);
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
- cfg->ast_skid_limit = __cpu_to_le32(0x10);
cfg->tx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_mcast_table_elems = __cpu_to_le32(0);
cfg->mcast2ucast_mode = __cpu_to_le32(0);
cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
- cfg->num_wds_entries = __cpu_to_le32(0x20);
cfg->dma_burst_size = __cpu_to_le32(0);
cfg->mac_aggr_delim = __cpu_to_le32(0);
cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -2450,6 +2484,80 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct wmi_tlv_mgmt_tx_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ void *ptr;
+ int len;
+ u32 buf_len = msdu->len;
+ struct ath10k_vif *arvif;
+ dma_addr_t mgmt_frame_dma;
+ u32 vdev_id;
+
+ if (!cb->vif)
+ return ERR_PTR(-EINVAL);
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+ buf_len = round_up(buf_len, 4);
+
+ len += buf_len;
+ len = round_up(len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->desc_id = 0;
+ cmd->chanfreq = 0;
+ cmd->buf_len = __cpu_to_le32(buf_len);
+ cmd->frame_len = __cpu_to_le32(msdu->len);
+ mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+ msdu->len, DMA_TO_DEVICE);
+ if (!mgmt_frame_dma)
+ return ERR_PTR(-ENOMEM);
+
+ cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(buf_len);
+
+ ptr += sizeof(*tlv);
+ memcpy(ptr, msdu->data, buf_len);
+
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms)
@@ -3258,6 +3366,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3592,6 +3701,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
/* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx,
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 22cf011e839a..da89128e8dd6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +22,7 @@
#define WMI_TLV_CMD_UNSUPPORTED 0
#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
enum wmi_tlv_grp_id {
WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
WMI_TLV_MGMT_TX_CMDID,
WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_MGMT_TX_SEND_CMD,
WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
WMI_TLV_ADDBA_SEND_CMDID,
WMI_TLV_ADDBA_STATUS_CMDID,
@@ -890,6 +892,63 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+ WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+ WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+ WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+ WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+ WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
WMI_TLV_TAG_MAX
};
@@ -965,6 +1024,50 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_TLV_SERVICE_MDNS_OFFLOAD,
WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ WMI_TLV_SERVICE_OCB,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+ WMI_TLV_SERVICE_MGMT_TX_HTT,
+ WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_TLV_SERVICE_EXT_MSG,
+ WMI_TLV_SERVICE_MAWC,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+ WMI_TLV_SERVICE_EGAP,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+ WMI_TLV_SERVICE_ATF,
+ WMI_TLV_SERVICE_COEX_GPIO,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_TLV_SERVICE_BPF_OFFLOAD,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+ WMI_TLV_SERVICE_NAN_DATA,
+ WMI_TLV_SERVICE_NAN_RTT,
+ WMI_TLV_SERVICE_11AX,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_TLV_SERVICE_MESH_11S,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
};
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_MDNS_OFFLOAD, len);
SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_MGMT_TX_WMI, len);
}
#undef SVCMAP
@@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
+struct wmi_tlv_mgmt_tx_cmd {
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le64 paddr;
+ __le32 frame_len;
+ __le32 buf_len;
+} __packed;
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cad2e42dcef6..58dc2189ba49 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -29,6 +29,7 @@
#include "p2p.h"
#include "hw.h"
#include "hif.h"
+#include "txrx.h"
#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
__le32_to_cpu(ev->rate_max));
}
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tdls_peer_event *ev;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ int vdev_id;
+ int peer_status;
+ int peer_reason;
+ u8 reason;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+ skb->len);
+ return;
+ }
+
+ ev = (struct wmi_tdls_peer_event *)skb->data;
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+ peer_status = __le32_to_cpu(ev->peer_status);
+ peer_reason = __le32_to_cpu(ev->peer_reason);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer entry for %pM\n",
+ ev->peer_macaddr.addr);
+ return;
+ }
+
+ switch (peer_status) {
+ case WMI_TDLS_SHOULD_TEARDOWN:
+ switch (peer_reason) {
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+ break;
+ default:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+ vdev_id);
+ return;
+ }
+
+ ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received tdls teardown event for peer %pM reason %u\n",
+ ev->peer_macaddr.addr, peer_reason);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received unknown tdls peer event %u\n",
+ peer_status);
+ break;
+ }
+}
+
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
ath10k_wmi_event_pdev_tpc_config(ar, skb);
break;
+ case WMI_10_4_TDLS_PEER_EVENTID:
+ ath10k_wmi_handle_tdls_peer_event(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index c02b21cff38d..c7b30ed9015d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -195,6 +195,8 @@ enum wmi_service {
WMI_SERVICE_SMART_LOGGING_SUPPORT,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
/* keep last */
WMI_SERVICE_MAX,
@@ -336,6 +338,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
};
static inline char *wmi_service_name(int service_id)
@@ -444,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+ SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
default:
return NULL;
}
@@ -740,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
}
#undef SVCMAP
@@ -797,6 +803,7 @@ struct wmi_cmd_map {
u32 bcn_filter_rx_cmdid;
u32 prb_req_filter_rx_cmdid;
u32 mgmt_tx_cmdid;
+ u32 mgmt_tx_send_cmdid;
u32 prb_tmpl_cmdid;
u32 addba_clear_resp_cmdid;
u32 addba_send_cmdid;
@@ -2922,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
__le32 max_tdls_concurrent_buffer_sta;
};
-/* strucutre describing host memory chunk. */
+/* structure describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
__le32 req_id;
@@ -5236,7 +5243,8 @@ enum wmi_10_4_vdev_param {
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 0d46d6dc7578..c4cbccb29b31 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
index 9745b9ddc7f5..6e810105b775 100644
--- a/drivers/net/wireless/ath/ath10k/wow.h
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b53eb2b85f02..2ba8cf3f38af 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_mgmt *mgmt;
bool hidden = false;
u8 *ies;
- int ies_len;
struct wmi_connect_cmd p;
int res;
int i, ret;
@@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
ies = mgmt->u.beacon.variable;
if (ies > info->beacon.head + info->beacon.head_len)
return -EINVAL;
- ies_len = info->beacon.head + info->beacon.head_len - ies;
if (info->ssid == NULL)
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 1379906bf849..8da9506f8c2b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
while (amsdu_len > mac_hdr_len) {
hdr = (struct ethhdr *) framep;
- payload_8023_len = ntohs(hdr->h_proto);
+ payload_8023_len = be16_to_cpu(hdr->h_proto);
if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 783a38f1a626..1f3523019509 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -61,13 +61,12 @@ config ATH9K_DEBUGFS
depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
select ATH9K_COMMON_DEBUG
- select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
- Also required for changing debug message flags at run time.
- As well as access to the FFT/spectral data and TX99.
+ Also required for changing debug message flags at run time and for
+ TX99.
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
@@ -177,7 +176,6 @@ config ATH9K_HTC_DEBUGFS
bool "Atheros ath9k_htc debugging"
depends on ATH9K_HTC && DEBUG_FS
select ATH9K_COMMON_DEBUG
- select RELAY
---help---
Say Y, if you need access to ath9k_htc's statistics.
As well as access to the FFT/spectral data.
@@ -192,3 +190,11 @@ config ATH9K_HWRNG
Say Y, feeds the entropy directly from the WiFi driver to the input
pool.
+
+config ATH9K_COMMON_SPECTRAL
+ bool "Atheros ath9k/ath9k_htc spectral scan support"
+ depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS
+ select RELAY
+ default n
+ ---help---
+ Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index d804ce7391a0..f71b2ad8275c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -62,8 +62,8 @@ ath9k_common-y:= common.o \
common-init.o \
common-beacon.o \
-ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
- common-spectral.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c2e210c0a770..f019a20e5a1f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3310,6 +3310,12 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
+ cptr = AR9300_BASE_ADDR_4K;
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
cptr = AR9300_BASE_ADDR_512;
ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
cptr);
@@ -3430,6 +3436,60 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
return len;
}
+static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size,
+ bool is_2g)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase;
+ struct ar9300_cal_data_per_freq_op_loop *cal_pier;
+ int cal_pier_nr;
+ int freq;
+ int i, j;
+
+ pBase = &eep->baseEepHeader;
+
+ if (is_2g)
+ cal_pier_nr = AR9300_NUM_2G_CAL_PIERS;
+ else
+ cal_pier_nr = AR9300_NUM_5G_CAL_PIERS;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!((pBase->txrxMask >> i) & 1))
+ continue;
+
+ len += snprintf(buf + len, size - len, "Chain %d\n", i);
+
+ len += snprintf(buf + len, size - len,
+ "Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n");
+
+ for (j = 0; j < cal_pier_nr; j++) {
+ if (is_2g) {
+ cal_pier = &eep->calPierData2G[i][j];
+ freq = 2300 + eep->calFreqPier2G[j];
+ } else {
+ cal_pier = &eep->calPierData5G[i][j];
+ freq = 4800 + eep->calFreqPier5G[j] * 5;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "%d\t", freq);
+
+ len += snprintf(buf + len, size - len,
+ "%d\t%d\t%d\t%d\t%d\t%d\n",
+ cal_pier->refPower,
+ cal_pier->voltMeas,
+ cal_pier->tempMeas,
+ cal_pier->rxTempMeas ?
+ N2DBM(cal_pier->rxNoisefloorCal) : 0,
+ cal_pier->rxTempMeas ?
+ N2DBM(cal_pier->rxNoisefloorPower) : 0,
+ cal_pier->rxTempMeas);
+ }
+ }
+
+ return len;
+}
+
static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
u8 *buf, u32 len, u32 size)
{
@@ -3441,10 +3501,18 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
"%20s :\n", "2GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
- len += scnprintf(buf + len, size - len,
+
+ len += scnprintf(buf + len, size - len, "Calibration data\n");
+ len = ar9003_dump_cal_data(ah, buf, len, size, true);
+
+ len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
+
+ len += snprintf(buf + len, size - len, "Calibration data\n");
+ len = ar9003_dump_cal_data(ah, buf, len, size, false);
+
goto out;
}
@@ -4683,7 +4751,8 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
int ichain,
int *pfrequency,
int *pcorrection,
- int *ptemperature, int *pvoltage)
+ int *ptemperature, int *pvoltage,
+ int *pnf_cal, int *pnf_power)
{
u8 *pCalPier;
struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
@@ -4725,6 +4794,10 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
*pcorrection = pCalPierStruct->refPower;
*ptemperature = pCalPierStruct->tempMeas;
*pvoltage = pCalPierStruct->voltMeas;
+ *pnf_cal = pCalPierStruct->rxTempMeas ?
+ N2DBM(pCalPierStruct->rxNoisefloorCal) : 0;
+ *pnf_power = pCalPierStruct->rxTempMeas ?
+ N2DBM(pCalPierStruct->rxNoisefloorPower) : 0;
return 0;
}
@@ -4889,14 +4962,18 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
int mode;
int lfrequency[AR9300_MAX_CHAINS],
lcorrection[AR9300_MAX_CHAINS],
- ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
+ lnf_cal[AR9300_MAX_CHAINS], lnf_pwr[AR9300_MAX_CHAINS];
int hfrequency[AR9300_MAX_CHAINS],
hcorrection[AR9300_MAX_CHAINS],
- htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS],
+ hnf_cal[AR9300_MAX_CHAINS], hnf_pwr[AR9300_MAX_CHAINS];
int fdiff;
int correction[AR9300_MAX_CHAINS],
- voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
- int pfrequency, pcorrection, ptemperature, pvoltage;
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS],
+ nf_cal[AR9300_MAX_CHAINS], nf_pwr[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage,
+ pnf_cal, pnf_pwr;
struct ath_common *common = ath9k_hw_common(ah);
mode = (frequency >= 4000);
@@ -4914,7 +4991,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
for (ipier = 0; ipier < npier; ipier++) {
if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
&pfrequency, &pcorrection,
- &ptemperature, &pvoltage)) {
+ &ptemperature, &pvoltage,
+ &pnf_cal, &pnf_pwr)) {
fdiff = frequency - pfrequency;
/*
@@ -4936,6 +5014,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
htemperature[ichain] =
ptemperature;
hvoltage[ichain] = pvoltage;
+ hnf_cal[ichain] = pnf_cal;
+ hnf_pwr[ichain] = pnf_pwr;
}
}
if (fdiff >= 0) {
@@ -4952,6 +5032,8 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ltemperature[ichain] =
ptemperature;
lvoltage[ichain] = pvoltage;
+ lnf_cal[ichain] = pnf_cal;
+ lnf_pwr[ichain] = pnf_pwr;
}
}
}
@@ -4960,15 +5042,20 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
+ ath_dbg(common, EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d n=%d %d p=%d %d\n",
ichain, frequency, lfrequency[ichain],
lcorrection[ichain], hfrequency[ichain],
- hcorrection[ichain]);
+ hcorrection[ichain], lnf_cal[ichain],
+ hnf_cal[ichain], lnf_pwr[ichain],
+ hnf_pwr[ichain]);
/* they're the same, so just pick one */
if (hfrequency[ichain] == lfrequency[ichain]) {
correction[ichain] = lcorrection[ichain];
voltage[ichain] = lvoltage[ichain];
temperature[ichain] = ltemperature[ichain];
+ nf_cal[ichain] = lnf_cal[ichain];
+ nf_pwr[ichain] = lnf_pwr[ichain];
}
/* the low frequency is good */
else if (frequency - lfrequency[ichain] < 1000) {
@@ -4992,12 +5079,26 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
hfrequency[ichain],
lvoltage[ichain],
hvoltage[ichain]);
+
+ nf_cal[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lnf_cal[ichain],
+ hnf_cal[ichain]);
+
+ nf_pwr[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lnf_pwr[ichain],
+ hnf_pwr[ichain]);
}
/* only low is good, use it */
else {
correction[ichain] = lcorrection[ichain];
temperature[ichain] = ltemperature[ichain];
voltage[ichain] = lvoltage[ichain];
+ nf_cal[ichain] = lnf_cal[ichain];
+ nf_pwr[ichain] = lnf_pwr[ichain];
}
}
/* only high is good, use it */
@@ -5005,10 +5106,14 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
correction[ichain] = hcorrection[ichain];
temperature[ichain] = htemperature[ichain];
voltage[ichain] = hvoltage[ichain];
+ nf_cal[ichain] = hnf_cal[ichain];
+ nf_pwr[ichain] = hnf_pwr[ichain];
} else { /* nothing is good, presume 0???? */
correction[ichain] = 0;
temperature[ichain] = 0;
voltage[ichain] = 0;
+ nf_cal[ichain] = 0;
+ nf_pwr[ichain] = 0;
}
}
@@ -5019,6 +5124,16 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
"for frequency=%d, calibration correction = %d %d %d\n",
frequency, correction[0], correction[1], correction[2]);
+ /* Store calibrated noise floor values */
+ for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++)
+ if (mode) {
+ ah->nf_5g.cal[ichain] = nf_cal[ichain];
+ ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
+ } else {
+ ah->nf_2g.cal[ichain] = nf_cal[ichain];
+ ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bd2269c7de6b..e8fda54acfe3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -62,6 +62,16 @@
*/
#define AR9300_PWR_TABLE_OFFSET 0
+/* Noise power data definitions
+ * units are: 4 x dBm - NOISE_PWR_DATA_OFFSET
+ * (e.g. -25 = (-25/4 - 90) = -96.25 dBm)
+ * range (for 6 signed bits) is (-32 to 31) + offset => -122dBm to -59dBm
+ * resolution (2 bits) is 0.25dBm
+ */
+#define NOISE_PWR_DATA_OFFSET -90
+#define NOISE_PWR_DBM_2_INT(_p) ((((_p) + 3) >> 2) + NOISE_PWR_DATA_OFFSET)
+#define N2DBM(_p) NOISE_PWR_DBM_2_INT(_p)
+
/* byte addressable */
#define AR9300_EEPROM_SIZE (16*1024)
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 13ab6bc46775..3d9447e21025 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -58,19 +58,25 @@ static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
}
static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
- struct ath9k_channel *chan)
+ struct ath9k_channel *chan,
+ int chain)
{
- return ath9k_hw_get_nf_limits(ah, chan)->nominal;
+ s16 calib_nf = ath9k_hw_get_nf_limits(ah, chan)->cal[chain];
+
+ if (calib_nf)
+ return calib_nf;
+ else
+ return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
s16 nf)
{
- s8 noise = ATH_DEFAULT_NOISE_FLOOR;
+ s8 noise = ath9k_hw_get_default_nf(ah, chan, 0);
if (nf) {
s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
- ath9k_hw_get_default_nf(ah, chan);
+ ath9k_hw_get_default_nf(ah, chan, 0);
if (delta > 0)
noise += delta;
}
@@ -240,7 +246,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
unsigned i, j;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
- s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+ s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal;
u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
if (ah->caldata)
@@ -258,8 +264,13 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
nfval = ah->nf_override;
else if (h)
nfval = h[i].privNF;
- else
- nfval = default_nf;
+ else {
+ /* Try to get calibrated noise floor value */
+ nfval =
+ ath9k_hw_get_nf_limits(ah, chan)->cal[i];
+ if (nfval > -60 || nfval < -127)
+ nfval = default_nf;
+ }
REG_RMW(ah, ah->nf_regs[i],
(((u32) nfval << 1) & 0x1ff), 0x1ff);
@@ -429,20 +440,19 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h;
- s16 default_nf;
- int i, j;
+ int i, j, k = 0;
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags;
h = ah->caldata->nfCalHist;
- default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
h[i].currIndex = 0;
- h[i].privNF = default_nf;
+ h[i].privNF = ath9k_hw_get_default_nf(ah, chan, k);
h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
- for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
- h[i].nfCalBuffer[j] = default_nf;
- }
+ for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++)
+ h[i].nfCalBuffer[j] = h[i].privNF;
+ if (++k >= AR5416_MAX_CHAINS)
+ k = 0;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 5d1a51d83aa6..303ab470ce34 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
return bins[0] & 0x3f;
}
-#ifdef CONFIG_ATH9K_COMMON_DEBUG
+#ifdef CONFIG_ATH9K_COMMON_SPECTRAL
void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
@@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
{
return 0;
}
-#endif /* CONFIG_ATH9K_COMMON_DEBUG */
+#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */
#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 40a397fd0e0e..6fee9a464cce 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
fft = (struct ath9k_dfs_fft_40 *) (data + 2);
ath_dbg(common, DFS, "fixing datalen by 2\n");
}
- if (IS_CHAN_HT40MINUS(ah->curchan)) {
- int temp = is_ctl;
- is_ctl = is_ext;
- is_ext = temp;
- }
+ if (IS_CHAN_HT40MINUS(ah->curchan))
+ swap(is_ctl, is_ext);
+
for (i = 0; i < FFT_NUM_SAMPLES; i++)
max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
is_ext);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f808e5833d7e..a82ad739ab80 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ if (tid >= ATH9K_HTC_MAX_TID) {
+ ret = -EINVAL;
+ break;
+ }
ista = (struct ath9k_htc_sta *) sta->drv_priv;
spin_lock_bh(&priv->tx.tx_lock);
ista->tid_state[tid] = AGGR_OPERATIONAL;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8c5c2dd8fa7f..cd0f023ccf77 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -922,6 +922,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
+ u32 msi_cfg = 0;
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah))
@@ -929,22 +930,30 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
if (AR_SREV_9300_20_OR_LATER(ah)) {
imr_reg |= AR_IMR_RXOK_HP;
- if (ah->config.rx_intr_mitigation)
+ if (ah->config.rx_intr_mitigation) {
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+ } else {
imr_reg |= AR_IMR_RXOK_LP;
-
+ msi_cfg |= AR_INTCFG_MSI_RXOK;
+ }
} else {
- if (ah->config.rx_intr_mitigation)
+ if (ah->config.rx_intr_mitigation) {
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_RXINTM | AR_INTCFG_MSI_RXMINTR;
+ } else {
imr_reg |= AR_IMR_RXOK;
+ msi_cfg |= AR_INTCFG_MSI_RXOK;
+ }
}
- if (ah->config.tx_intr_mitigation)
+ if (ah->config.tx_intr_mitigation) {
imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
- else
+ msi_cfg |= AR_INTCFG_MSI_TXINTM | AR_INTCFG_MSI_TXMINTR;
+ } else {
imr_reg |= AR_IMR_TXOK;
+ msi_cfg |= AR_INTCFG_MSI_TXOK;
+ }
ENABLE_REGWRITE_BUFFER(ah);
@@ -952,6 +961,16 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
ah->imrs2_reg |= AR_IMR_S2_GTT;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+ if (ah->msi_enabled) {
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN;
+ ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+ REG_WRITE(ah, AR_INTCFG, msi_cfg);
+ ath_dbg(ath9k_hw_common(ah), ANY,
+ "value of AR_INTCFG=0x%X, msi_cfg=0x%X\n",
+ REG_READ(ah, AR_INTCFG), msi_cfg);
+ }
+
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 4ac70827d142..9804a24a2dc0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -754,6 +754,8 @@ struct ath_nf_limits {
s16 max;
s16 min;
s16 nominal;
+ s16 cal[AR5416_MAX_CHAINS];
+ s16 pwr[AR5416_MAX_CHAINS];
};
enum ath_cal_list {
@@ -977,6 +979,9 @@ struct ath_hw {
bool tpc_enabled;
u8 tx_power[Ar5416RateSize];
u8 tx_power_stbc[Ar5416RateSize];
+ bool msi_enabled;
+ u32 msi_mask;
+ u32 msi_reg;
};
struct ath_bus_ops {
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fa58a32227f5..e479fae5aab9 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/relay.h>
+#include <linux/dmi.h>
#include <net/ieee80211_radiotap.h>
#include "ath9k.h"
@@ -75,6 +76,10 @@ MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
+int ath9k_use_msi;
+module_param_named(use_msi, ath9k_use_msi, int, 0444);
+MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible");
+
bool is_ath9k_unloaded;
#ifdef CONFIG_MAC80211_LEDS
@@ -92,6 +97,56 @@ static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
};
#endif
+static int __init set_use_msi(const struct dmi_system_id *dmi)
+{
+ ath9k_use_msi = 1;
+ return 1;
+}
+
+static const struct dmi_system_id ath9k_quirks[] __initconst = {
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 24-3460",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 24-3460"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Vostro 3262",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3262"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 3472",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3472"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Vostro 15-3572",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15-3572"),
+ },
+ },
+ {
+ .callback = set_use_msi,
+ .ident = "Dell Inspiron 14-3473",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14-3473"),
+ },
+ },
+ {}
+};
+
static void ath9k_deinit_softc(struct ath_softc *sc);
static void ath9k_op_ps_wakeup(struct ath_common *common)
@@ -1100,6 +1155,8 @@ static int __init ath9k_init(void)
goto err_pci_exit;
}
+ dmi_check_system(ath9k_quirks);
+
return 0;
err_pci_exit:
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 77c94f9e7b61..58d02c19b6d0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -832,6 +832,43 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
}
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+
+ if (ah->msi_enabled) {
+ u32 _msi_reg = 0;
+ u32 i = 0;
+ u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
+
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
+
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
+
+ if (ah->msi_reg == 0)
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
+ "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
+ AR_PCIE_MSI, ah->msi_reg);
+
+ i = 0;
+ do {
+ REG_WRITE(ah, AR_PCIE_MSI,
+ (ah->msi_reg | AR_PCIE_MSI_ENABLE)
+ & msi_pend_addr_mask);
+ _msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ i++;
+ } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
+
+ if (i >= 200)
+ ath_err(ath9k_hw_common(ah),
+ "%s: _msi_reg = 0x%X\n",
+ __func__, _msi_reg);
+ }
}
void ath9k_hw_resume_interrupts(struct ath_hw *ah)
@@ -878,12 +915,21 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
+ if (ah->msi_enabled) {
+ ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
+
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
+ }
+
ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
+ ah->msi_mask = 0;
if (ints & ATH9K_INT_TX) {
+ ah->msi_mask |= AR_INTR_PRIO_TX;
if (ah->config.tx_intr_mitigation)
mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
else {
@@ -898,6 +944,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask |= AR_IMR_TXEOL;
}
if (ints & ATH9K_INT_RX) {
+ ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
if (AR_SREV_9300_20_OR_LATER(ah)) {
mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
if (ah->config.rx_intr_mitigation) {
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 223606311261..645f0fbd9179 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -22,6 +22,8 @@
#include <linux/module.h>
#include "ath9k.h"
+extern int ath9k_use_msi;
+
static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
@@ -889,6 +891,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u32 val;
int ret = 0;
char hw_name[64];
+ int msi_enabled = 0;
if (pcim_enable_device(pdev))
return -EIO;
@@ -960,7 +963,20 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
- ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ if (ath9k_use_msi) {
+ if (pci_enable_msi(pdev) == 0) {
+ msi_enabled = 1;
+ dev_err(&pdev->dev, "Using MSI\n");
+ } else {
+ dev_err(&pdev->dev, "Using INTx\n");
+ }
+ }
+
+ if (!msi_enabled)
+ ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ else
+ ret = request_irq(pdev->irq, ath_isr, 0, "ath9k", sc);
+
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto err_irq;
@@ -974,6 +990,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init;
}
+ sc->sc_ah->msi_enabled = msi_enabled;
+ sc->sc_ah->msi_reg = 0;
+
ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 2197aee2bb72..a8ac42c96d71 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -826,9 +826,9 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
sc->rx.discard_next = false;
/*
- * Discard zero-length packets.
+ * Discard zero-length packets and packets smaller than an ACK
*/
- if (!rx_stats->rs_datalen) {
+ if (rx_stats->rs_datalen < 10) {
RX_STAT_INC(rx_len_err);
goto corrupt;
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 80ff69f99229..653e79611830 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -146,6 +146,14 @@
#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
#define AR_MACMISC_MISC_OBS_BUS_1 1
+#define AR_INTCFG 0x005C
+#define AR_INTCFG_MSI_RXOK 0x00000000
+#define AR_INTCFG_MSI_RXINTM 0x00000004
+#define AR_INTCFG_MSI_RXMINTR 0x00000006
+#define AR_INTCFG_MSI_TXOK 0x00000000
+#define AR_INTCFG_MSI_TXINTM 0x00000010
+#define AR_INTCFG_MSI_TXMINTR 0x00000018
+
#define AR_DATABUF_SIZE 0x0060
#define AR_DATABUF_SIZE_MASK 0x00000FFF
@@ -1256,6 +1264,13 @@ enum {
#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
(AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
#define AR_PCIE_MSI_ENABLE 0x00000001
+#define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */
+#define AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64 0xFFA0C9FF /* bits 8..11: value must be 0x5064 */
+
+#define AR_INTR_PRIO_TX 0x00000001
+#define AR_INTR_PRIO_RXLP 0x00000002
+#define AR_INTR_PRIO_RXHP 0x00000004
#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index d5c810a8cc52..a3f1f7d042a4 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -236,6 +236,14 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
return 0;
}
+static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
+{
+ size_t size;
+
+ size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+ dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
+}
+
static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
struct wcn36xx_dxe_mem_pool *pool)
{
@@ -722,7 +730,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ return ret;
+ }
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
/* Write channel head to a NEXT register */
@@ -740,7 +752,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_txh_ch;
+ }
+
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
/* Write channel head to a NEXT register */
@@ -760,7 +777,12 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_rxl_ch;
+ }
+
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -790,7 +812,11 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+ ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+ if (ret) {
+ dev_err(wcn->dev, "Error allocating descriptor\n");
+ goto out_err_rxh_ch;
+ }
/* For RX we need to prealocat buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
@@ -819,11 +845,19 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
ret = wcn36xx_dxe_request_irqs(wcn);
if (ret < 0)
- goto out_err;
+ goto out_err_irq;
return 0;
-out_err:
+out_err_irq:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
+out_err_rxh_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
+out_err_rxl_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
+out_err_txh_ch:
+ wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b765c647319d..182963522941 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_DHCP_START_IND = 189,
WCN36XX_HAL_DHCP_STOP_IND = 190,
+ /* Scan Offload(hw) APIs */
+ WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204,
+ WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
+ WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
+ WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+ WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
+
WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
@@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg {
} __packed;
+enum wcn36xx_hal_scan_type {
+ WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00,
+ WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_mac_ssid {
+ u8 length;
+ u8 ssid[32];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* BSSIDs hot list */
+ u8 num_bssid;
+ u8 bssids[4][ETH_ALEN];
+
+ /* Directed probe-requests will be sent for listed SSIDs (max 10)*/
+ u8 num_ssid;
+ struct wcn36xx_hal_mac_ssid ssids[10];
+
+ /* Report AP with hidden ssid */
+ u8 scan_hidden;
+
+ /* Self MAC address */
+ u8 mac[ETH_ALEN];
+
+ /* BSS type */
+ enum wcn36xx_hal_bss_type bss_type;
+
+ /* Scan type */
+ enum wcn36xx_hal_scan_type scan_type;
+
+ /* Minimum scanning time on each channel (ms) */
+ u32 min_ch_time;
+
+ /* Maximum scanning time on each channel */
+ u32 max_ch_time;
+
+ /* Is a p2p search */
+ u8 p2p_search;
+
+ /* Channels to scan */
+ u8 num_channel;
+ u8 channels[80];
+
+ /* IE field */
+ u16 ie_len;
+ u8 ie[0];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
+enum wcn36xx_hal_scan_offload_ind_type {
+ /* Scan has been started */
+ WCN36XX_HAL_SCAN_IND_STARTED = 0x01,
+ /* Scan has been completed */
+ WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02,
+ /* Moved to foreign channel */
+ WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08,
+ /* scan request has been dequeued */
+ WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10,
+ /* preempted by other high priority scan */
+ WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20,
+ /* scan start failed */
+ WCN36XX_HAL_SCAN_IND_FAILED = 0x40,
+ /*scan restarted */
+ WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80,
+ WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_scan_offload_ind {
+ struct wcn36xx_hal_msg_header header;
+
+ u32 type;
+ u32 channel_mhz;
+ u32 scan_id;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_req_msg {
+ struct wcn36xx_hal_msg_header header;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_rsp_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ /* success or failure */
+ u32 status;
+} __packed;
+
enum wcn36xx_hal_rate_index {
HW_RATE_INDEX_1MBPS = 0x82,
HW_RATE_INDEX_2MBPS = 0x84,
@@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record {
u16 txop_limit;
} __packed;
-struct wcn36xx_hal_mac_ssid {
- u8 length;
- u8 ssid[32];
-} __packed;
-
/* Concurrency role. These are generic IDs that identify the various roles
* in the software system. */
enum wcn36xx_hal_con_mode {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..ab5be6d2c691 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ if (vif->bss_conf.ps) /* ps allowed ? */
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -629,7 +641,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
-
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
mutex_unlock(&wcn->scan_lock);
@@ -638,11 +649,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
wcn->scan_aborted = false;
wcn->scan_req = &hw_req->req;
+
mutex_unlock(&wcn->scan_lock);
- schedule_work(&wcn->scan_work);
+ if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ /* legacy manual/sw scan */
+ schedule_work(&wcn->scan_work);
+ return 0;
+ }
- return 0;
+ return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -650,6 +666,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct wcn36xx *wcn = hw->priv;
+ if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+ struct cfg80211_scan_info scan_info = { .aborted = true };
+
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ }
+
mutex_lock(&wcn->scan_lock);
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
@@ -747,17 +769,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif_priv->dtim_period = bss_conf->dtim_period;
}
- if (changed & BSS_CHANGED_PS) {
- wcn36xx_dbg(WCN36XX_DBG_MAC,
- "mac bss PS set %d\n",
- bss_conf->ps);
- if (bss_conf->ps) {
- wcn36xx_pmc_enter_bmps_state(wcn, vif);
- } else {
- wcn36xx_pmc_exit_bmps_state(wcn, vif);
- }
- }
-
if (changed & BSS_CHANGED_BSSID) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
- wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
- return -EINVAL;
+ /* Unbalanced call or last BMPS enter failed */
+ wcn36xx_dbg(WCN36XX_DBG_PMC,
+ "Not in BMPS mode, no need to exit\n");
+ return -EALREADY;
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 9c6590d5348a..2a4871ca9c72 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
};
@@ -613,6 +615,85 @@ out:
return ret;
}
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
+ int ret, i;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+ msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+ msg_body.min_ch_time = 30;
+ msg_body.max_ch_time = 100;
+ msg_body.scan_hidden = 1;
+ memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+ msg_body.p2p_search = vif->p2p;
+
+ msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+ for (i = 0; i < msg_body.num_ssid; i++) {
+ msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+ sizeof(msg_body.ssids[i].ssid));
+ memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+ msg_body.ssids[i].length);
+ }
+
+ msg_body.num_channel = min_t(u8, req->n_channels,
+ sizeof(msg_body.channels));
+ for (i = 0; i < msg_body.num_channel; i++)
+ msg_body.channels[i] = req->channels[i]->hw_value;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+ msg_body.num_channel, msg_body.num_ssid,
+ msg_body.p2p_search ? "yes" : "no");
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan_offload failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_stop_scan_offload failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
return 0;
}
+static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+ struct wcn36xx_hal_scan_offload_ind *rsp = buf;
+ struct cfg80211_scan_info scan_info = {};
+
+ if (len != sizeof(*rsp)) {
+ wcn36xx_warn("Corrupted delete scan indication\n");
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+
+ switch (rsp->type) {
+ case WCN36XX_HAL_SCAN_IND_FAILED:
+ scan_info.aborted = true;
+ case WCN36XX_HAL_SCAN_IND_COMPLETED:
+ mutex_lock(&wcn->scan_lock);
+ wcn->scan_req = NULL;
+ mutex_unlock(&wcn->scan_lock);
+ ieee80211_scan_completed(wcn->hw, &scan_info);
+ break;
+ case WCN36XX_HAL_SCAN_IND_STARTED:
+ case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
+ case WCN36XX_HAL_SCAN_IND_DEQUEUED:
+ case WCN36XX_HAL_SCAN_IND_PREEMPTED:
+ case WCN36XX_HAL_SCAN_IND_RESTARTED:
+ break;
+ default:
+ wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
+ }
+
+ return 0;
+}
+
static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
void *buf,
size_t len)
@@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_CH_SWITCH_RSP:
case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
+ case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
+ case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
@@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ case WCN36XX_HAL_SCAN_OFFLOAD_IND:
msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
@@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
struct wcn36xx_hal_ind_msg,
list);
+ list_del(wcn->hal_ind_queue.next);
+ spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
@@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
+ case WCN36XX_HAL_SCAN_OFFLOAD_IND:
+ wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
+ hal_ind_msg->msg_len);
+ break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
}
- list_del(wcn->hal_ind_queue.next);
- spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
kfree(hal_ind_msg);
}
int wcn36xx_smd_open(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 013fc9546f56..8076edf40ac8 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode);
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
index c131b5e1292f..d32c1f4e533a 100644
--- a/drivers/net/wireless/ath/wil6210/boot_loader.h
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -39,7 +40,8 @@ struct bl_dedicated_registers_v1 {
/* valid only for version 2 and above */
__le32 bl_assert_code; /* 0x880A58 BL Assert code */
__le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
- __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
+ __le32 bl_shutdown_handshake; /* 0x880A60 BL cleaner shutdown */
+ __le32 bl_reserved[21]; /* 0x880A64 - 0x880AB4 */
__le32 bl_magic_number; /* 0x880AB8 BL Magic number */
} __packed;
@@ -58,4 +60,9 @@ struct bl_dedicated_registers_v0 {
u8 mac_address[6]; /* 0x880A4c BL mac address */
} __packed;
+/* bits for bl_shutdown_handshake */
+#define BL_SHUTDOWN_HS_GRTD BIT(0)
+#define BL_SHUTDOWN_HS_RTD BIT(1)
+#define BL_SHUTDOWN_HS_PROT_VER(x) WIL_GET_BITS(x, 28, 31)
+
#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 85d5c04618eb..768f63f38341 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
u64 *cookie)
{
const u8 *buf = params->buf;
- size_t len = params->len;
+ size_t len = params->len, total;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
bool tx_status = false;
@@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
- cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ total = sizeof(*cmd) + len;
+ if (total < len)
+ return -EINVAL;
+
+ cmd = kmalloc(total, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
- rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (rc == 0)
tx_status = !evt.evt.status;
@@ -952,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil_to_wdev(wil);
- wdev->preset_chandef = *chandef;
+ wil->monitor_chandef = *chandef;
return 0;
}
@@ -1727,9 +1730,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy,
wil_dbg_pm(wil, "suspending\n");
- wil_p2p_stop_discovery(wil);
-
+ mutex_lock(&wil->mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_radio_operations(wil);
wil_abort_scan(wil, true);
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ mutex_unlock(&wil->mutex);
out:
return rc;
@@ -1744,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
return 0;
}
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int i, rc;
+
+ wil_dbg_misc(wil,
+ "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+ request->n_ssids, request->ie_len, request->flags);
+ for (i = 0; i < request->n_ssids; i++) {
+ wil_dbg_misc(wil, "SSID[%d]:", i);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ request->ssids[i].ssid,
+ request->ssids[i].ssid_len, true);
+ }
+ wil_dbg_misc(wil, "channels:");
+ for (i = 0; i < request->n_channels; i++)
+ wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+ i == request->n_channels - 1 ? "\n" : "");
+ wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+ request->n_match_sets, request->min_rssi_thold,
+ request->delay);
+ for (i = 0; i < request->n_match_sets; i++) {
+ struct cfg80211_match_set *ms = &request->match_sets[i];
+
+ wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+ i, ms->rssi_thold);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ ms->ssid.ssid,
+ ms->ssid.ssid_len, true);
+ }
+ wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+ for (i = 0; i < request->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+ wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+ i, sp->interval, sp->iterations);
+ }
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+ if (rc)
+ return rc;
+ return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+ u64 reqid)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ rc = wmi_stop_sched_scan(wil);
+ /* device would return error if it thinks PNO is already stopped.
+ * ignore the return code so user space and driver gets back in-sync
+ */
+ wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+ return 0;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1777,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
+ .sched_scan_start = wil_cfg80211_sched_scan_start,
+ .sched_scan_stop = wil_cfg80211_sched_scan_stop,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e58dc6dc1f9c..4a4888246e8c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
+ wil_pm_runtime_put(wil);
+
return 0;
}
@@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = {
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
- writel(val, (void __iomem *)data);
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ writel(val, (void __iomem *)d->offset);
wmb(); /* make sure write propagated to HW */
+ wil_pm_runtime_put(wil);
+
return 0;
}
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
{
- *val = readl((void __iomem *)data);
+ struct wil_debugfs_iomem_data *d = (struct
+ wil_debugfs_iomem_data *)data;
+ struct wil6210_priv *wil = d->wil;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ *val = readl((void __iomem *)d->offset);
+
+ wil_pm_runtime_put(wil);
return 0;
}
@@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
umode_t mode,
struct dentry *parent,
- void *value)
+ void *value,
+ struct wil6210_priv *wil)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_iomem_x32);
+ struct dentry *file;
+ struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+ wil->dbg_data.iomem_data_count];
+
+ data->wil = wil;
+ data->offset = value;
+
+ file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+ if (!IS_ERR_OR_NULL(file))
+ wil->dbg_data.iomem_data_count++;
+
+ return file;
}
static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
case doff_io32:
f = wil_debugfs_create_iomem_x32(tbl[i].name,
tbl[i].mode, dbg,
- base + tbl[i].off);
+ base + tbl[i].off,
+ wil);
break;
case doff_u8:
f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
static int wil_memread_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+ void __iomem *a;
+ int ret;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
+ a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+ wil_pm_runtime_put(wil);
+
return 0;
}
@@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
{
enum { max_count = 4096 };
struct wil_blob_wrapper *wil_blob = file->private_data;
+ struct wil6210_priv *wil = wil_blob->wil;
loff_t pos = *ppos;
size_t available = wil_blob->blob.size;
void *buf;
size_t ret;
+ int rc;
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0) {
+ kfree(buf);
+ return rc;
+ }
+
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
+
+ wil_pm_runtime_put(wil);
+
kfree(buf);
if (ret == count)
return -EFAULT;
@@ -808,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
params.buf = frame;
params.len = len;
- params.chan = wdev->preset_chandef.chan;
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
@@ -1571,8 +1631,6 @@ static ssize_t wil_write_suspend_stats(struct file *file,
struct wil6210_priv *wil = file->private_data;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
- wil->suspend_stats.collection_start = ktime_get();
return len;
}
@@ -1582,33 +1640,41 @@ static ssize_t wil_read_suspend_stats(struct file *file,
size_t count, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
- static char text[400];
- int n;
- unsigned long long stats_collection_time =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.collection_start));
+ char *text;
+ int n, ret, text_size = 500;
- n = snprintf(text, sizeof(text),
- "Suspend statistics:\n"
+ text = kmalloc(text_size, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+ n = snprintf(text, text_size,
+ "Radio on suspend statistics:\n"
+ "successful suspends:%ld failed suspends:%ld\n"
+ "successful resumes:%ld failed resumes:%ld\n"
+ "rejected by device:%ld\n"
+ "Radio off suspend statistics:\n"
"successful suspends:%ld failed suspends:%ld\n"
"successful resumes:%ld failed resumes:%ld\n"
- "rejected by host:%ld rejected by device:%ld\n"
- "total suspend time:%lld min suspend time:%lld\n"
- "max suspend time:%lld stats collection time: %lld\n",
- wil->suspend_stats.successful_suspends,
- wil->suspend_stats.failed_suspends,
- wil->suspend_stats.successful_resumes,
- wil->suspend_stats.failed_resumes,
- wil->suspend_stats.rejected_by_host,
+ "General statistics:\n"
+ "rejected by host:%ld\n",
+ wil->suspend_stats.r_on.successful_suspends,
+ wil->suspend_stats.r_on.failed_suspends,
+ wil->suspend_stats.r_on.successful_resumes,
+ wil->suspend_stats.r_on.failed_resumes,
wil->suspend_stats.rejected_by_device,
- wil->suspend_stats.total_suspend_time,
- wil->suspend_stats.min_suspend_time,
- wil->suspend_stats.max_suspend_time,
- stats_collection_time);
+ wil->suspend_stats.r_off.successful_suspends,
+ wil->suspend_stats.r_off.failed_suspends,
+ wil->suspend_stats.r_off.successful_resumes,
+ wil->suspend_stats.r_off.failed_resumes,
+ wil->suspend_stats.rejected_by_host);
+
+ n = min_t(int, n, text_size);
- n = min_t(int, n, sizeof(text));
+ ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
- return simple_read_from_buffer(user_buf, count, ppos, text, n);
+ kfree(text);
+
+ return ret;
}
static const struct file_operations fops_suspend_stats = {
@@ -1736,14 +1802,31 @@ static const struct dbg_off dbg_statics[] = {
{},
};
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+ ARRAY_SIZE(dbg_wil_regs) - 1 +
+ ARRAY_SIZE(pseudo_isr_off) - 1 +
+ ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+ ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+ ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
int wil6210_debugfs_init(struct wil6210_priv *wil)
{
struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
wil_to_wiphy(wil)->debugfsdir);
-
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
+ wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+ sizeof(struct wil_debugfs_iomem_data),
+ GFP_KERNEL);
+ if (!wil->dbg_data.data_arr) {
+ debugfs_remove_recursive(dbg);
+ wil->debug = NULL;
+ return -ENOMEM;
+ }
+
+ wil->dbg_data.iomem_data_count = 0;
+
wil_pmc_init(wil);
wil6210_debugfs_init_files(wil, dbg);
@@ -1758,8 +1841,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
wil6210_debugfs_create_ITR_CNT(wil, dbg);
- wil->suspend_stats.collection_start = ktime_get();
-
return 0;
}
@@ -1768,6 +1849,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
+ kfree(wil->dbg_data.data_arr);
+
/* free pmc memory without sending command to fw, as it will
* be reset on the way down anyway
*/
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4dabf7..66200f616a37 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
struct wil6210_priv *wil = ndev_to_wil(ndev);
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
+ int ret;
wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
+ wil_pm_runtime_put(wil);
+
cp->tx_coalesce_usecs = tx_itr_val;
cp->rx_coalesce_usecs = rx_itr_val;
return 0;
@@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *cp)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int ret;
wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+ ret = wil_pm_runtime_get(wil);
+ if (ret < 0)
+ return ret;
+
wil_configure_interrupt_moderation(wil);
+ wil_pm_runtime_put(wil);
+
return 0;
out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 2f2b910501ba..2c7b24f61587 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,15 +59,30 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* Comment header - common for all comment record types */
+struct wil_fw_record_comment_hdr {
+ __le32 magic;
+};
+
/* FW capabilities encoded inside a comment record */
#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
/* identifies capabilities record */
- __le32 magic;
+ struct wil_fw_record_comment_hdr hdr;
/* capabilities (variable size), see enum wmi_fw_capability */
u8 capabilities[0];
};
+/* brd file info encoded inside a comment record */
+#define WIL_BRD_FILE_MAGIC (0xabcddcbb)
+struct wil_fw_record_brd_file { /* type == wil_fw_type_comment */
+ /* identifies brd file record */
+ struct wil_fw_record_comment_hdr hdr;
+ __le32 version;
+ __le32 base_addr;
+ __le32 max_size_bytes;
+} __packed;
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index e01acac88825..914c0106e94b 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -26,14 +27,17 @@
prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
- ioaddr = wmi_buffer(wil, val); \
- if (!ioaddr) { \
- wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
- le32_to_cpu(val)); \
- return -EINVAL; \
- } \
- } while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+ void __iomem **ioaddr, __le32 val,
+ u32 size, const char *msg)
+{
+ *ioaddr = wmi_buffer_block(wil, val, size);
+ if (!(*ioaddr)) {
+ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+ return false;
+ }
+ return true;
+}
/**
* wil_fw_verify - verify firmware file validity
@@ -124,14 +128,6 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
return 0;
}
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
- size_t size)
-{
- wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
- return 0;
-}
-
static int
fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
size_t size)
@@ -139,9 +135,11 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
const struct wil_fw_record_capabilities *rec = data;
size_t capa_size;
- if (size < sizeof(*rec) ||
- le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+ if (size < sizeof(*rec)) {
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
return 0;
+ }
capa_size = size - offsetof(struct wil_fw_record_capabilities,
capabilities);
@@ -153,8 +151,56 @@ fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
return 0;
}
-static int fw_handle_data(struct wil6210_priv *wil, const void *data,
- size_t size)
+static int
+fw_handle_brd_file(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_brd_file *rec = data;
+
+ if (size < sizeof(*rec)) {
+ wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+ data, size, true);
+ return 0;
+ }
+
+ wil->brd_file_addr = le32_to_cpu(rec->base_addr);
+ wil->brd_file_max_size = le32_to_cpu(rec->max_size_bytes);
+
+ wil_dbg_fw(wil, "brd_file_addr 0x%x, brd_file_max_size %d\n",
+ wil->brd_file_addr, wil->brd_file_max_size);
+
+ return 0;
+}
+
+static int
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_comment_hdr *hdr = data;
+ u32 magic;
+ int rc = 0;
+
+ if (size < sizeof(*hdr))
+ return 0;
+
+ magic = le32_to_cpu(hdr->magic);
+
+ switch (magic) {
+ case WIL_FW_CAPABILITIES_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_FW_CAPABILITIES_MAGIC\n");
+ rc = fw_handle_capabilities(wil, data, size);
+ break;
+ case WIL_BRD_FILE_MAGIC:
+ wil_dbg_fw(wil, "magic is WIL_BRD_FILE_MAGIC\n");
+ rc = fw_handle_brd_file(wil, data, size);
+ break;
+ }
+
+ return rc;
+}
+
+static int __fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size, __le32 addr)
{
const struct wil_fw_record_data *d = data;
void __iomem *dst;
@@ -165,15 +211,23 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
- wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
- s);
+ if (!wil_fw_addr_check(wil, &dst, addr, s, "address"))
+ return -EINVAL;
+ wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(addr), s);
wil_memcpy_toio_32(dst, d->data, s);
wmb(); /* finish before processing next record */
return 0;
}
+static int fw_handle_data(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_data *d = data;
+
+ return __fw_handle_data(wil, data, size, d->addr);
+}
+
static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -197,7 +251,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
v = le32_to_cpu(d->value);
wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -253,7 +308,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
u32 v = le32_to_cpu(block[i].value);
u32 x, y;
- FW_ADDR_CHECK(dst, block[i].addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+ return -EINVAL;
x = readl(dst);
y = (x & m) | (v & ~m);
@@ -319,10 +375,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
- FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr") ||
+ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+ "gateway_value_addr") ||
+ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
" cmd 0x%08x ctl 0x%08x\n",
@@ -378,12 +439,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr"))
+ return -EINVAL;
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
- "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_val[k],
+ d->gateway_value_addr[k],
+ 0, "gateway_value_addr"))
+ return -EINVAL;
+ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
@@ -422,7 +490,7 @@ static const struct {
int (*parse_handler)(struct wil6210_priv *wil, const void *data,
size_t size);
} wil_fw_handlers[] = {
- {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
{wil_fw_type_data, fw_handle_data, fw_ignore_section},
{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
@@ -517,7 +585,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
rc = request_firmware(&fw, name, wil_to_dev(wil));
if (rc) {
- wil_err_fw(wil, "Failed to load firmware %s\n", name);
+ wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
return rc;
}
wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
@@ -539,6 +607,100 @@ out:
}
/**
+ * wil_brd_process - process section from BRD file
+ *
+ * Return error code
+ */
+static int wil_brd_process(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ int rc = 0;
+ const struct wil_fw_record_head *hdr = data;
+ size_t s, hdr_sz;
+ u16 type;
+
+ /* Assuming the board file includes only one header record and one data
+ * record. Each record starts with wil_fw_record_head.
+ */
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ s = sizeof(*hdr) + le32_to_cpu(hdr->size);
+ if (s > size)
+ return -EINVAL;
+
+ /* Skip the header record and handle the data record */
+ hdr = (const void *)hdr + s;
+ size -= s;
+ if (size < sizeof(*hdr))
+ return -EINVAL;
+ hdr_sz = le32_to_cpu(hdr->size);
+
+ if (wil->brd_file_max_size && hdr_sz > wil->brd_file_max_size)
+ return -EINVAL;
+ if (sizeof(*hdr) + hdr_sz > size)
+ return -EINVAL;
+ if (hdr_sz % 4) {
+ wil_err_fw(wil, "unaligned record size: %zu\n",
+ hdr_sz);
+ return -EINVAL;
+ }
+ type = le16_to_cpu(hdr->type);
+ if (type != wil_fw_type_data) {
+ wil_err_fw(wil, "invalid record type for board file: %d\n",
+ type);
+ return -EINVAL;
+ }
+ if (hdr_sz < sizeof(struct wil_fw_record_data)) {
+ wil_err_fw(wil, "data record too short: %zu\n", hdr_sz);
+ return -EINVAL;
+ }
+
+ wil_dbg_fw(wil, "using addr from fw file: [0x%08x]\n",
+ wil->brd_file_addr);
+
+ rc = __fw_handle_data(wil, &hdr[1], hdr_sz,
+ cpu_to_le32(wil->brd_file_addr));
+
+ return rc;
+}
+
+/**
+ * wil_request_board - Request board file
+ *
+ * Request board image from the file
+ * board file address and max size are read from FW file
+ * during initialization.
+ * brd file shall include one header and one data section.
+ *
+ * Return error code
+ */
+int wil_request_board(struct wil6210_priv *wil, const char *name)
+{
+ int rc, dlen;
+ const struct firmware *brd;
+
+ rc = request_firmware(&brd, name, wil_to_dev(wil));
+ if (rc) {
+ wil_err_fw(wil, "Failed to load brd %s\n", name);
+ return rc;
+ }
+ wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, brd->size);
+
+ /* Verify the header */
+ dlen = wil_fw_verify(wil, brd->data, brd->size);
+ if (dlen < 0) {
+ rc = dlen;
+ goto out;
+ }
+ /* Process the data record */
+ rc = wil_brd_process(wil, brd->data, dlen);
+
+out:
+ release_firmware(brd);
+ return rc;
+}
+
+/**
* wil_fw_verify_file_exists - checks if firmware file exist
*
* @wil: driver context
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f3fcf3..1835187ea075 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -358,6 +359,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -376,8 +396,9 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
- u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
- u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+ u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+ u32 ucode_assert_code =
+ wil_r(wil, wil->rgf_ucode_assert_code_addr);
wil_err(wil,
"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
@@ -393,7 +414,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_mbox_ready, wil->status);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
@@ -545,7 +567,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
- /* FIXME: IRQ mask debug */
+ /* IRQ mask debug */
if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 885924abf61c..0c61a6c13991 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -579,7 +580,6 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
- wil->suspend_stats.min_suspend_time = ULONG_MAX;
wil->vring_idle_trsh = 16;
return 0;
@@ -638,6 +638,98 @@ void wil_priv_deinit(struct wil6210_priv *wil)
destroy_workqueue(wil->wmi_wq);
}
+static void wil_shutdown_bl(struct wil6210_priv *wil)
+{
+ u32 val;
+
+ wil_s(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), BL_SHUTDOWN_HS_GRTD);
+
+ usleep_range(100, 150);
+
+ val = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ if (val & BL_SHUTDOWN_HS_RTD) {
+ wil_dbg_misc(wil, "BL is ready for halt\n");
+ return;
+ }
+
+ wil_err(wil, "BL did not report ready for halt\n");
+}
+
+/* this format is used by ARC embedded CPU for instruction memory */
+static inline u32 ARC_me_imm32(u32 d)
+{
+ return ((d & 0xffff0000) >> 16) | ((d & 0x0000ffff) << 16);
+}
+
+/* defines access to interrupt vectors for wil_freeze_bl */
+#define ARC_IRQ_VECTOR_OFFSET(N) ((N) * 8)
+/* ARC long jump instruction */
+#define ARC_JAL_INST (0x20200f80)
+
+static void wil_freeze_bl(struct wil6210_priv *wil)
+{
+ u32 jal, upc, saved;
+ u32 ivt3 = ARC_IRQ_VECTOR_OFFSET(3);
+
+ jal = wil_r(wil, wil->iccm_base + ivt3);
+ if (jal != ARC_me_imm32(ARC_JAL_INST)) {
+ wil_dbg_misc(wil, "invalid IVT entry found, skipping\n");
+ return;
+ }
+
+ /* prevent the target from entering deep sleep
+ * and disabling memory access
+ */
+ saved = wil_r(wil, RGF_USER_USAGE_8);
+ wil_w(wil, RGF_USER_USAGE_8, saved | BIT_USER_PREVENT_DEEP_SLEEP);
+ usleep_range(20, 25); /* let the BL process the bit */
+
+ /* redirect to endless loop in the INT_L1 context and let it trap */
+ wil_w(wil, wil->iccm_base + ivt3 + 4, ARC_me_imm32(ivt3));
+ usleep_range(20, 25); /* let the BL get into the trap */
+
+ /* verify the BL is frozen */
+ upc = wil_r(wil, RGF_USER_CPU_PC);
+ if (upc < ivt3 || (upc > (ivt3 + 8)))
+ wil_dbg_misc(wil, "BL freeze failed, PC=0x%08X\n", upc);
+
+ wil_w(wil, RGF_USER_USAGE_8, saved);
+}
+
+static void wil_bl_prepare_halt(struct wil6210_priv *wil)
+{
+ u32 tmp, ver;
+
+ /* before halting device CPU driver must make sure BL is not accessing
+ * host memory. This is done differently depending on BL version:
+ * 1. For very old BL versions the procedure is skipped
+ * (not supported).
+ * 2. For old BL version we use a special trick to freeze the BL
+ * 3. For new BL versions we shutdown the BL using handshake procedure.
+ */
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_struct_version));
+ if (!tmp) {
+ wil_dbg_misc(wil, "old BL, skipping halt preperation\n");
+ return;
+ }
+
+ tmp = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake));
+ ver = BL_SHUTDOWN_HS_PROT_VER(tmp);
+
+ if (ver > 0)
+ wil_shutdown_bl(wil);
+ else
+ wil_freeze_bl(wil);
+}
+
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
@@ -671,7 +763,7 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
-static int wil_target_reset(struct wil6210_priv *wil)
+static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
@@ -685,9 +777,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_halt_cpu(wil);
- /* clear all boot loader "ready" bits */
- wil_w(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
+ if (!no_flash) {
+ /* clear all boot loader "ready" bits */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready), 0);
+ /* this should be safe to write even with old BLs */
+ wil_w(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v1,
+ bl_shutdown_handshake), 0);
+ }
/* Clear Fw Download notification */
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
@@ -728,21 +827,33 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
- do {
- msleep(RST_DELAY);
- x = wil_r(wil, RGF_USER_BL +
- offsetof(struct bl_dedicated_registers_v0,
- boot_loader_ready));
- if (x1 != x) {
- wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
- x1 = x;
- }
- if (delay++ > RST_COUNT) {
- wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
- x);
- return -ETIME;
- }
- } while (x != BL_READY);
+ if (no_flash)
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, USER_EXT_USER_PMU_3);
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while ((x & BIT_PMU_DEVICE_RDY) == 0);
+ else
+ do {
+ msleep(RST_DELAY);
+ x = wil_r(wil, RGF_USER_BL +
+ offsetof(struct bl_dedicated_registers_v0,
+ boot_loader_ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
+ x1, x);
+ x1 = x;
+ }
+ if (delay++ > RST_COUNT) {
+ wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
+ x);
+ return -ETIME;
+ }
+ } while (x != BL_READY);
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -750,6 +861,21 @@ static int wil_target_reset(struct wil6210_priv *wil)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+ if (no_flash) {
+ /* Reset OTP HW vectors to fit 40MHz */
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME3, 0x1);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME4, 0x20027);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME5, 0x30003);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME6, 0x20002);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME7, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME8, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME9, 0x60001);
+ wil_w(wil, RGF_USER_XPM_IFC_RD_TIME10, 0x60001);
+ wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
+ }
+
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@@ -760,6 +886,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
u8 retry_short;
int rc;
+ wil_refresh_fw_capabilities(wil);
+
rc = wmi_get_mgmt_retry(wil, &retry_short);
if (!rc) {
wiphy->retry_short = retry_short;
@@ -767,6 +895,43 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
}
}
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ int features;
+
+ wil->keep_radio_on_during_sleep =
+ test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+ wil->platform_capa) &&
+ test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+ wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+ wil->keep_radio_on_during_sleep);
+
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ else
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+ wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ if (wil->platform_ops.set_features) {
+ features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+ wil->fw_capabilities) &&
+ test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+ wil->platform_capa)) ?
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+ wil->platform_ops.set_features(wil->platform_handle, features);
+ }
+}
+
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
le32_to_cpus(&r->base);
@@ -868,6 +1033,27 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
}
}
+static int wil_get_otp_info(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ u8 mac[8];
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
+ sizeof(mac));
+ if (!is_valid_ether_addr(mac)) {
+ wil_err(wil, "Invalid MAC %pM\n", mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ ether_addr_copy(ndev->dev_addr, mac);
+
+ return 0;
+}
+
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
ulong to = msecs_to_jiffies(1000);
@@ -960,6 +1146,8 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
+ unsigned long status_flags = BIT(wil_status_resetting);
+ int no_flash;
wil_dbg_misc(wil, "reset\n");
@@ -980,6 +1168,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+ }
+
+ if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+ }
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
@@ -989,6 +1187,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the reset.
+ * following crash dump collection, reset would take place.
+ */
+ wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+ rc = -EBUSY;
+ goto out;
+ }
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1003,7 +1209,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
- bitmap_zero(wil->status, wil_status_last);
+ if (test_bit(wil_status_suspending, wil->status))
+ status_flags |= BIT(wil_status_suspending);
+ bitmap_and(wil->status, wil->status, &status_flags,
+ wil_status_last);
+ wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
mutex_unlock(&wil->wmi_mutex);
wil_mask_irq(wil);
@@ -1013,37 +1223,53 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
flush_workqueue(wil->wq_service);
flush_workqueue(wil->wmi_wq);
- wil_bl_crash_info(wil, false);
+ no_flash = test_bit(hw_capa_no_flash, wil->hw_capa);
+ if (!no_flash)
+ wil_bl_crash_info(wil, false);
wil_disable_irq(wil);
- rc = wil_target_reset(wil);
+ rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil_rx_fini(wil);
if (rc) {
- wil_bl_crash_info(wil, true);
- return rc;
+ if (!no_flash)
+ wil_bl_crash_info(wil, true);
+ goto out;
}
- rc = wil_get_bl_info(wil);
- if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
- rc = 0;
+ if (no_flash) {
+ rc = wil_get_otp_info(wil);
+ } else {
+ rc = wil_get_bl_info(wil);
+ if (rc == -EAGAIN && !load_fw)
+ /* ignore RF error if not going up */
+ rc = 0;
+ }
if (rc)
- return rc;
+ goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
wil_info(wil, "Use firmware <%s> + board <%s>\n",
wil->wil_fw_name, WIL_BOARD_FILE_NAME);
+ if (!no_flash)
+ wil_bl_prepare_halt(wil);
+
wil_halt_cpu(wil);
memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
- return rc;
- rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
+ goto out;
+ if (wil->brd_file_addr)
+ rc = wil_request_board(wil, WIL_BOARD_FILE_NAME);
+ else
+ rc = wil_request_firmware(wil,
+ WIL_BOARD_FILE_NAME,
+ true);
if (rc)
- return rc;
+ goto out;
wil_pre_fw_config(wil);
wil_release_cpu(wil);
@@ -1055,6 +1281,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
+ clear_bit(wil_status_resetting, wil->status);
+
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
@@ -1071,11 +1299,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
+ wil_collect_fw_info(wil);
+
if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
wil_ps_update(wil, wil->ps_profile);
- wil_collect_fw_info(wil);
-
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
@@ -1088,6 +1316,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
return rc;
+
+out:
+ clear_bit(wil_status_resetting, wil->status);
+ return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1193,9 +1425,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
- wil_reset(wil, false);
-
- return 0;
+ return wil_reset(wil, false);
}
int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a6ab2d0fdf1..7ba4e0af8f57 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -21,6 +21,7 @@
static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc;
wil_dbg_misc(wil, "open\n");
@@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev)
return -EINVAL;
}
- return wil_up(wil);
+ rc = wil_pm_runtime_get(wil);
+ if (rc < 0)
+ return rc;
+
+ rc = wil_up(wil);
+ if (rc)
+ wil_pm_runtime_put(wil);
+
+ return rc;
}
static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ int rc;
wil_dbg_misc(wil, "stop\n");
- return wil_down(wil);
+ rc = wil_down(wil);
+ if (!rc)
+ wil_pm_runtime_put(wil);
+
+ return rc;
}
static const struct net_device_ops wil_netdev_ops = {
@@ -136,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
- cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+ cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
if (!ndev) {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 6a3ab4bf916d..809092a49192 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,6 +22,7 @@
#include <linux/suspend.h>
#include "wil6210.h"
#include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
static bool use_msi = true;
module_param(use_msi, bool, 0444);
@@ -30,29 +32,30 @@ static bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int wil6210_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
static
-void wil_set_capabilities(struct wil6210_priv *wil)
+int wil_set_capabilities(struct wil6210_priv *wil)
{
const char *wil_fw_name;
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
+ int platform_capa;
+ struct fw_map *iccm_section, *sct;
- bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->hw_capa, hw_capa_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
switch (jtag_id) {
case JTAG_DEV_ID_SPARROW:
+ memcpy(fw_mapping, sparrow_fw_mapping,
+ sizeof(sparrow_fw_mapping));
switch (chip_revision) {
case REVISION_ID_SPARROW_D0:
wil->hw_name = "Sparrow D0";
@@ -62,6 +65,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
if (wil_fw_verify_file_exists(wil, wil_fw_name))
wil->wil_fw_name = wil_fw_name;
+ sct = wil_find_fw_mapping("mac_rgf_ext");
+ if (!sct) {
+ wil_err(wil, "mac_rgf_ext section not found in fw_mapping\n");
+ return -EINVAL;
+ }
+ memcpy(sct, &sparrow_d0_mac_rgf_ext, sizeof(*sct));
break;
case REVISION_ID_SPARROW_B0:
wil->hw_name = "Sparrow B0";
@@ -72,21 +81,50 @@ void wil_set_capabilities(struct wil6210_priv *wil)
wil->hw_version = HW_VER_UNKNOWN;
break;
}
+ wil->rgf_fw_assert_code_addr = SPARROW_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
+ break;
+ case JTAG_DEV_ID_TALYN:
+ wil->hw_name = "Talyn";
+ wil->hw_version = HW_VER_TALYN;
+ memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
+ wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
+ wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
+ if (wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1) &
+ BIT_NO_FLASH_INDICATION)
+ set_bit(hw_capa_no_flash, wil->hw_capa);
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
wil->hw_name = "Unknown";
wil->hw_version = HW_VER_UNKNOWN;
+ return -EINVAL;
+ }
+
+ iccm_section = wil_find_fw_mapping("fw_code");
+ if (!iccm_section) {
+ wil_err(wil, "fw_code section not found in fw_mapping\n");
+ return -EINVAL;
}
+ wil->iccm_base = iccm_section->host;
- wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+ wil_info(wil, "Board hardware is %s, flash %sexist\n", wil->hw_name,
+ test_bit(hw_capa_no_flash, wil->hw_capa) ? "doesn't " : "");
+
+ /* Get platform capabilities */
+ if (wil->platform_ops.get_capa) {
+ platform_capa =
+ wil->platform_ops.get_capa(wil->platform_handle);
+ memcpy(wil->platform_capa, &platform_capa,
+ min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+ }
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
+ wil_refresh_fw_capabilities(wil);
- if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
- wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ return 0;
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -209,6 +247,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
+ int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+ int i;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@@ -244,21 +284,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
- /* device supports 48bit addresses */
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ /* device supports >32bit addresses */
+ for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+ rc = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
- dev_err(dev,
- "dma_set_mask_and_coherent(32) failed: %d\n",
- rc);
- goto err_plat;
+ dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+ dma_addr_size[i], rc);
+ continue;
}
- } else {
- wil->use_extended_dma_addr = 1;
+ dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+ wil->dma_addr_size = dma_addr_size[i];
+ break;
}
+ if (wil->dma_addr_size == 0)
+ goto err_plat;
+
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@@ -293,18 +335,13 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* rollback to err_iounmap */
wil_info(wil, "CSR at %pR -> 0x%p\n", &pdev->resource[0], wil->csr);
- wil_set_capabilities(wil);
+ rc = wil_set_capabilities(wil);
+ if (rc) {
+ wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
+ goto err_iounmap;
+ }
wil6210_clear_irq(wil);
- wil->keep_radio_on_during_sleep =
- wil->platform_ops.keep_radio_on_during_sleep &&
- wil->platform_ops.keep_radio_on_during_sleep(
- wil->platform_handle) &&
- test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
- wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
- wil->keep_radio_on_during_sleep);
-
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
@@ -319,20 +356,19 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto bus_disable;
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
- wil->pm_notify.notifier_call = wil6210_pm_notify;
+ if (IS_ENABLED(CONFIG_PM))
+ wil->pm_notify.notifier_call = wil6210_pm_notify;
+
rc = register_pm_notifier(&wil->pm_notify);
if (rc)
/* Do not fail the driver initialization, as suspend can
* be prevented in a later phase if needed
*/
wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
+ wil_pm_runtime_allow(wil);
return 0;
@@ -359,11 +395,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil_dbg_misc(wil, "pcie_remove\n");
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
+
+ wil_pm_runtime_forbid(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
@@ -381,18 +415,19 @@ static void wil_pcie_remove(struct pci_dev *pdev)
static const struct pci_device_id wil6210_pcie_ids[] = {
{ PCI_DEVICE(0x1ae9, 0x0310) },
{ PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
+ { PCI_DEVICE(0x17cb, 0x1201) }, /* Talyn */
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -400,16 +435,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
if (rc)
goto out;
- rc = wil_suspend(wil, is_runtime);
+ rc = wil_suspend(wil, is_runtime, keep_radio_on);
if (!rc) {
- wil->suspend_stats.successful_suspends++;
-
- /* If platform device supports keep_radio_on_during_sleep
- * it will control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
/* disable bus mastering */
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.successful_suspends++;
+ } else {
+ wil->suspend_stats.r_on.successful_suspends++;
+ }
}
out:
return rc;
@@ -420,23 +457,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
int rc = 0;
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
+ struct net_device *ndev = wil_to_ndev(wil);
+ bool keep_radio_on = ndev->flags & IFF_UP &&
+ wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
- /* If platform device supports keep_radio_on_during_sleep it will
- * control PCIe master
+ /* In case radio stays on, platform device will control
+ * PCIe master
*/
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on)
/* allow master */
pci_set_master(pdev);
- rc = wil_resume(wil, is_runtime);
+ rc = wil_resume(wil, is_runtime, keep_radio_on);
if (rc) {
wil_err(wil, "device failed to resume (%d)\n", rc);
- wil->suspend_stats.failed_resumes++;
- if (!wil->keep_radio_on_during_sleep)
+ if (!keep_radio_on) {
pci_clear_master(pdev);
+ wil->suspend_stats.r_off.failed_resumes++;
+ } else {
+ wil->suspend_stats.r_on.failed_resumes++;
+ }
} else {
- wil->suspend_stats.successful_resumes++;
+ if (keep_radio_on)
+ wil->suspend_stats.r_on.successful_resumes++;
+ else
+ wil->suspend_stats.r_off.successful_resumes++;
}
return rc;
@@ -481,21 +527,49 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
return rc;
}
-static int wil6210_pm_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
}
-static int wil6210_pm_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
{
return wil6210_resume(dev, false);
}
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil_dbg_pm(wil, "Runtime idle\n");
+
+ return wil_can_suspend(wil, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
+{
+ return wil6210_resume(dev, true);
+}
+
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ if (test_bit(wil_status_suspended, wil->status)) {
+ wil_dbg_pm(wil, "trying to suspend while suspended\n");
+ return 1;
+ }
+
+ return wil6210_suspend(dev, true);
+}
static const struct dev_pm_ops wil6210_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+ SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+ wil6210_pm_runtime_resume,
+ wil6210_pm_runtime_idle)
};
static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b447aaa..0a96518a566f 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,30 @@
#include "wil6210.h"
#include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
{
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
struct net_device *ndev = wil_to_ndev(wil);
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
+ if (wmi_only || debug_fw) {
+ wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+ wmi_only ? "wmi_only" : "debug_fw");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (is_runtime && !wil->platform_ops.suspend) {
+ rc = -EBUSY;
+ goto out;
+ }
if (!(ndev->flags & IFF_UP)) {
/* can always sleep when down */
wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
/* interface is running */
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
+ wil_dbg_pm(wil, "Sniffer\n");
+ rc = -EBUSY;
+ goto out;
+ /* for STA-like interface, don't runtime suspend */
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
rc = -EBUSY;
goto out;
}
+ /* Runtime pm not supported in case the interface is up */
+ if (is_runtime) {
+ wil_dbg_pm(wil, "STA-like interface\n");
+ rc = -EBUSY;
+ goto out;
+ }
break;
/* AP-like interface - can't suspend */
default:
@@ -120,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
/* Prevent handling of new tx and wmi commands */
set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
wil_update_net_queues_bh(wil, NULL, true);
if (!wil_is_tx_idle(wil)) {
@@ -158,7 +190,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -174,7 +206,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
*/
if (!wil_is_wmi_idle(wil)) {
wil_err(wil, "suspend failed due to pending WMI events\n");
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
goto resume_after_fail;
}
@@ -188,7 +220,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
if (rc) {
wil_err(wil, "platform device failed to suspend (%d)\n",
rc);
- wil->suspend_stats.failed_suspends++;
+ wil->suspend_stats.r_on.failed_suspends++;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
wil_unmask_irq(wil);
goto resume_after_fail;
@@ -230,11 +262,21 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
+ set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
+
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
if (rc) {
wil_err(wil, "wil_down : %d\n", rc);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -247,6 +289,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
rc = wil->platform_ops.suspend(wil->platform_handle, false);
if (rc) {
wil_enable_irq(wil);
+ wil->suspend_stats.r_off.failed_suspends++;
goto out;
}
}
@@ -254,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
set_bit(wil_status_suspended, wil->status);
out:
+ clear_bit(wil_status_suspending, wil->status);
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;
@@ -279,12 +323,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil)
return rc;
}
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
@@ -301,19 +342,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
- if (!rc)
- wil->suspend_stats.suspend_start_time = ktime_get();
-
return rc;
}
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
{
int rc = 0;
- struct net_device *ndev = wil_to_ndev(wil);
- bool keep_radio_on = ndev->flags & IFF_UP &&
- wil->keep_radio_on_during_sleep;
- unsigned long long suspend_time_usec = 0;
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
@@ -331,20 +365,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
else
rc = wil_resume_radio_off(wil);
- if (rc)
- goto out;
-
- suspend_time_usec =
- ktime_to_us(ktime_sub(ktime_get(),
- wil->suspend_stats.suspend_start_time));
- wil->suspend_stats.total_suspend_time += suspend_time_usec;
- if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
- wil->suspend_stats.min_suspend_time = suspend_time_usec;
- if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
- wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
out:
- wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
- is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+ wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+ rc);
return rc;
}
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_forbid(dev);
+ pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+ int rc;
+ struct device *dev = wil_to_dev(wil);
+
+ rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
+
+ return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 2e301b6b32a9..4ea27b0bd278 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
&pmc->pring_pa,
GFP_KERNEL);
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 389c718cd257..16b8a4e5201f 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
- * if we are using 48 bit addresses switch to 32 bit allocation
- * before allocating vring memory.
+ * if we are using more than 32 bit addresses switch to 32 bit
+ * allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
- if (wil->use_extended_dma_addr)
+ if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return -ENOMEM;
}
- if (wil->use_extended_dma_addr)
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (wil->dma_addr_size > 32)
+ dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
- struct wireless_dev *wdev = wil->wdev;
struct wil6210_rtap {
struct ieee80211_radiotap_header rthdr;
/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
int rtap_len = sizeof(struct wil6210_rtap);
int phy_length = 0; /* phy info header size, bytes */
static char phy_data[128];
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (rtap_include_phy_info) {
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1e340d04bd70..0df2aada6659 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -82,18 +83,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
*/
#define WIL_MAX_MPDU_OVERHEAD (62)
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
unsigned long successful_suspends;
- unsigned long failed_suspends;
unsigned long successful_resumes;
+ unsigned long failed_suspends;
unsigned long failed_resumes;
- unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+ struct wil_suspend_count_stats r_off;
+ struct wil_suspend_count_stats r_on;
+ unsigned long rejected_by_device; /* only radio on */
unsigned long rejected_by_host;
- unsigned long long total_suspend_time;
- unsigned long long min_suspend_time;
- unsigned long long max_suspend_time;
- ktime_t collection_start;
- ktime_t suspend_start_time;
};
/* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -161,10 +162,15 @@ struct RGF_ICR {
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
#define BIT_USER_OOB_R2_MODE BIT(30)
+#define RGF_USER_USAGE_8 (0x880020)
+ #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
+ #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
+ #define BIT_USER_EXT_CLK BIT(2)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
#define BIT_USER_USER_CPU_MAN_RST BIT(1) /* user_cpu_man_rst */
+#define RGF_USER_CPU_PC (0x8801e8)
#define RGF_USER_MAC_CPU_0 (0x8801fc)
#define BIT_USER_MAC_CPU_MAN_RST BIT(1) /* mac_cpu_man_rst */
#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
@@ -190,6 +196,19 @@ struct RGF_ICR {
#define RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1 (0x880c2c)
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
+#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
+ #define BIT_NO_FLASH_INDICATION BIT(8)
+#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
+#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
+#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
+#define RGF_USER_XPM_IFC_RD_TIME4 (0x880cf8)
+#define RGF_USER_XPM_IFC_RD_TIME5 (0x880cfc)
+#define RGF_USER_XPM_IFC_RD_TIME6 (0x880d00)
+#define RGF_USER_XPM_IFC_RD_TIME7 (0x880d04)
+#define RGF_USER_XPM_IFC_RD_TIME8 (0x880d08)
+#define RGF_USER_XPM_IFC_RD_TIME9 (0x880d0c)
+#define RGF_USER_XPM_IFC_RD_TIME10 (0x880d10)
+#define RGF_USER_XPM_RD_DOUT_SAMPLE_TIME (0x880d64)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -280,22 +299,33 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
+#define USER_EXT_USER_PMU_3 (0x88d00c)
+ #define BIT_PMU_DEVICE_RDY BIT(0)
+
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
+ #define JTAG_DEV_ID_TALYN (0x7e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
+#define RGF_OTP_MAC (0x8a0620)
+
/* crash codes for FW/Ucode stored here */
-#define RGF_FW_ASSERT_CODE (0x91f020)
-#define RGF_UCODE_ASSERT_CODE (0x91f028)
+
+/* ASSERT RGFs */
+#define SPARROW_RGF_FW_ASSERT_CODE (0x91f020)
+#define SPARROW_RGF_UCODE_ASSERT_CODE (0x91f028)
+#define TALYN_RGF_FW_ASSERT_CODE (0xa37020)
+#define TALYN_RGF_UCODE_ASSERT_CODE (0xa37028)
enum {
HW_VER_UNKNOWN,
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
+ HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
};
/* popular locations */
@@ -311,6 +341,10 @@ enum {
#define WIL_DATA_COMPLETION_TO_MS 200
/* Hardware definitions end */
+#define SPARROW_FW_MAPPING_TABLE_SIZE 10
+#define TALYN_FW_MAPPING_TABLE_SIZE 13
+#define MAX_FW_MAPPING_TABLE_SIZE 13
+
struct fw_map {
u32 from; /* linker address - from, inclusive */
u32 to; /* linker address - to, exclusive */
@@ -320,7 +354,10 @@ struct fw_map {
};
/* array size should be in sync with actual definition in the wmi.c */
-extern const struct fw_map fw_mapping[10];
+extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
+extern const struct fw_map sparrow_d0_mac_rgf_ext;
+extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
+extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
* mk_cidxtid - construct @cidxtid field
@@ -435,12 +472,13 @@ enum { /* for wil6210_priv.status */
wil_status_fwconnected,
wil_status_dontscan,
wil_status_mbox_ready, /* MBOX structures ready */
- wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_irqen, /* interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
+ wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@@ -565,7 +603,8 @@ enum {
};
enum {
- hw_capability_last
+ hw_capa_no_flash,
+ hw_capa_last
};
struct wil_probe_client_req {
@@ -616,6 +655,16 @@ struct blink_on_off_time {
u32 off_ms;
};
+struct wil_debugfs_iomem_data {
+ void *offset;
+ struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+ struct wil_debugfs_iomem_data *data_arr;
+ int iomem_data_count;
+};
+
extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
extern u8 led_id;
extern u8 led_polarity;
@@ -631,14 +680,19 @@ struct wil6210_priv {
u8 chip_revision;
const char *hw_name;
const char *wil_fw_name;
- DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ char *board_file;
+ u32 brd_file_addr;
+ u32 brd_file_max_size;
+ DECLARE_BITMAP(hw_capa, hw_capa_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
unsigned long last_fw_recovery; /* jiffies of last fw recovery */
wait_queue_head_t wq; /* for all wait_event() use */
/* profile */
+ struct cfg80211_chan_def monitor_chandef;
u32 monitor_flags;
u32 privacy; /* secure connection? */
u8 hidden_ssid; /* relevant in AP mode */
@@ -694,7 +748,7 @@ struct wil6210_priv {
struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring;
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
- bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+ u32 dma_addr_size; /* indicates dma addr size */
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -703,11 +757,12 @@ struct wil6210_priv {
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
- struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ struct wil_blob_wrapper blobs[MAX_FW_MAPPING_TABLE_SIZE];
u8 discovery_mode;
u8 abft_len;
u8 wakeup_trigger;
struct wil_suspend_stats suspend_stats;
+ struct wil_debugfs_data dbg_data;
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -731,16 +786,16 @@ struct wil6210_priv {
int fw_calib_result;
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
bool suspend_resp_rcvd;
bool suspend_resp_comp;
u32 bus_request_kbps;
u32 bus_request_kbps_pre_suspend;
+
+ u32 rgf_fw_assert_code_addr;
+ u32 rgf_ucode_assert_code_addr;
+ u32 iccm_base;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -861,10 +916,13 @@ int wil_up(struct wil6210_priv *wil);
int __wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void wil_set_ethtoolops(struct net_device *ndev);
+struct fw_map *wil_find_fw_mapping(const char *section);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -997,11 +1055,17 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
bool load);
+int wil_request_board(struct wil6210_priv *wil, const char *name);
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
@@ -1016,4 +1080,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index e53cf0cf7031..1ed330674d9b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
+ set_bit(wil_status_collecting_dumps, wil->status);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resetting, wil->status)) {
+ wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+ clear_bit(wil_status_collecting_dumps, wil->status);
+ return -EINVAL;
+ }
+
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
(const void __iomem * __force)data, len);
}
+ clear_bit(wil_status_collecting_dumps, wil->status);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 5d9e4bfcb045..177026e5323b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -27,6 +27,18 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
+enum wil_platform_features {
+ WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+ WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+ WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+ WIL_PLATFORM_CAPA_EXT_CLK = 2,
+ WIL_PLATFORM_CAPA_MAX,
+};
+
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
- bool (*keep_radio_on_during_sleep)(void *handle);
+ int (*get_capa)(void *handle);
+ void (*set_features)(void *handle, int features);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ffdd2fa401b1..b31e2514f8c2 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +39,7 @@ MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
/**
* WMI event receiving - theory of operations
@@ -69,23 +71,23 @@ MODULE_PARM_DESC(led_id,
* On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
* AHB addresses starting from 0x880000
*
- * Internally, firmware uses addresses that allows faster access but
+ * Internally, firmware uses addresses that allow faster access but
* are invisible from the host. To read from these addresses, alternative
* AHB address must be used.
- *
- * Memory mapping
- * Linker address PCI/Host address
- * 0x880000 .. 0xa80000 2Mb BAR0
- * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
- * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
*/
/**
- * @fw_mapping provides memory remapping table
+ * @sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
+ *
+ * Sparrow memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x808000 0x900000 .. 0x908000 32k DCCM
+ * 0x840000 .. 0x860000 0x908000 .. 0x928000 128k PERIPH
*/
-const struct fw_map fw_mapping[] = {
+const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true},
/* FW data RAM 32k */
@@ -111,6 +113,59 @@ const struct fw_map fw_mapping[] = {
{0x800000, 0x804000, 0x940000, "uc_data", false},
};
+/**
+ * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+ * it is a bit larger to support extra features
+ */
+const struct fw_map sparrow_d0_mac_rgf_ext = {
+ 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
+};
+
+/**
+ * @talyn_fw_mapping provides memory remapping table for Talyn
+ *
+ * array size should be in sync with the declaration in the wil6210.h
+ *
+ * Talyn memory mapping:
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xc80000 4Mb BAR0
+ * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
+ * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
+ */
+const struct fw_map talyn_fw_mapping[] = {
+ /* FW code RAM 1M */
+ {0x000000, 0x100000, 0x900000, "fw_code", true},
+ /* FW data RAM 128k */
+ {0x800000, 0x820000, 0xa00000, "fw_data", true},
+ /* periph. data RAM 96k */
+ {0x840000, 0x858000, 0xa20000, "fw_peri", true},
+ /* various RGF 40k */
+ {0x880000, 0x88a000, 0x880000, "rgf", true},
+ /* AGC table 4k */
+ {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
+ /* Pcie_ext_rgf 4k */
+ {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
+ /* mac_ext_rgf 1344b */
+ {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
+ /* ext USER RGF 4k */
+ {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
+ /* OTP 4k */
+ {0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
+ /* DMA EXT RGF 64k */
+ {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
+ /* upper area 1536k */
+ {0x900000, 0xa80000, 0x900000, "upper", true},
+ /* UCODE areas - accessible by debugfs blobs but not by
+ * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
+ */
+ /* ucode code RAM 256k */
+ {0x000000, 0x040000, 0xa38000, "uc_code", false},
+ /* ucode data RAM 32k */
+ {0x800000, 0x808000, 0xa78000, "uc_data", false},
+};
+
+struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
+
struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
{WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
@@ -138,15 +193,35 @@ static u32 wmi_addr_remap(u32 x)
}
/**
+ * find fw_mapping entry by section name
+ * @section - section name
+ *
+ * Return pointer to section or NULL if not found
+ */
+struct fw_map *wil_find_fw_mapping(const char *section)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++)
+ if (fw_mapping[i].name &&
+ !strcmp(section, fw_mapping[i].name))
+ return &fw_mapping[i];
+
+ return NULL;
+}
+
+/**
* Check address validity for WMI buffer; remap if needed
* @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ * exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
*
* return address for accessing buffer from the host;
* if buffer is not valid, return NULL.
*/
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
{
u32 off;
u32 ptr = le32_to_cpu(ptr_);
@@ -161,10 +236,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
+ if (size && ((off + size > wil->bar_size) || (off + size < off)))
+ return NULL;
return wil->csr + off;
}
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ return wmi_buffer_block(wil, ptr_, 0);
+}
+
/**
* Check address validity
*/
@@ -198,6 +280,242 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
return 0;
}
+static const char *cmdid2name(u16 cmdid)
+{
+ switch (cmdid) {
+ case WMI_NOTIFY_REQ_CMDID:
+ return "WMI_NOTIFY_REQ_CMD";
+ case WMI_START_SCAN_CMDID:
+ return "WMI_START_SCAN_CMD";
+ case WMI_CONNECT_CMDID:
+ return "WMI_CONNECT_CMD";
+ case WMI_DISCONNECT_CMDID:
+ return "WMI_DISCONNECT_CMD";
+ case WMI_SW_TX_REQ_CMDID:
+ return "WMI_SW_TX_REQ_CMD";
+ case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+ case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+ return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+ case WMI_BRP_SET_ANT_LIMIT_CMDID:
+ return "WMI_BRP_SET_ANT_LIMIT_CMD";
+ case WMI_TOF_SESSION_START_CMDID:
+ return "WMI_TOF_SESSION_START_CMD";
+ case WMI_AOA_MEAS_CMDID:
+ return "WMI_AOA_MEAS_CMD";
+ case WMI_PMC_CMDID:
+ return "WMI_PMC_CMD";
+ case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+ case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+ case WMI_VRING_CFG_CMDID:
+ return "WMI_VRING_CFG_CMD";
+ case WMI_BCAST_VRING_CFG_CMDID:
+ return "WMI_BCAST_VRING_CFG_CMD";
+ case WMI_TRAFFIC_SUSPEND_CMDID:
+ return "WMI_TRAFFIC_SUSPEND_CMD";
+ case WMI_TRAFFIC_RESUME_CMDID:
+ return "WMI_TRAFFIC_RESUME_CMD";
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMD";
+ case WMI_SET_MAC_ADDRESS_CMDID:
+ return "WMI_SET_MAC_ADDRESS_CMD";
+ case WMI_LED_CFG_CMDID:
+ return "WMI_LED_CFG_CMD";
+ case WMI_PCP_START_CMDID:
+ return "WMI_PCP_START_CMD";
+ case WMI_PCP_STOP_CMDID:
+ return "WMI_PCP_STOP_CMD";
+ case WMI_SET_SSID_CMDID:
+ return "WMI_SET_SSID_CMD";
+ case WMI_GET_SSID_CMDID:
+ return "WMI_GET_SSID_CMD";
+ case WMI_SET_PCP_CHANNEL_CMDID:
+ return "WMI_SET_PCP_CHANNEL_CMD";
+ case WMI_GET_PCP_CHANNEL_CMDID:
+ return "WMI_GET_PCP_CHANNEL_CMD";
+ case WMI_P2P_CFG_CMDID:
+ return "WMI_P2P_CFG_CMD";
+ case WMI_START_LISTEN_CMDID:
+ return "WMI_START_LISTEN_CMD";
+ case WMI_START_SEARCH_CMDID:
+ return "WMI_START_SEARCH_CMD";
+ case WMI_DISCOVERY_STOP_CMDID:
+ return "WMI_DISCOVERY_STOP_CMD";
+ case WMI_DELETE_CIPHER_KEY_CMDID:
+ return "WMI_DELETE_CIPHER_KEY_CMD";
+ case WMI_ADD_CIPHER_KEY_CMDID:
+ return "WMI_ADD_CIPHER_KEY_CMD";
+ case WMI_SET_APPIE_CMDID:
+ return "WMI_SET_APPIE_CMD";
+ case WMI_CFG_RX_CHAIN_CMDID:
+ return "WMI_CFG_RX_CHAIN_CMD";
+ case WMI_TEMP_SENSE_CMDID:
+ return "WMI_TEMP_SENSE_CMD";
+ case WMI_DEL_STA_CMDID:
+ return "WMI_DEL_STA_CMD";
+ case WMI_DISCONNECT_STA_CMDID:
+ return "WMI_DISCONNECT_STA_CMD";
+ case WMI_VRING_BA_EN_CMDID:
+ return "WMI_VRING_BA_EN_CMD";
+ case WMI_VRING_BA_DIS_CMDID:
+ return "WMI_VRING_BA_DIS_CMD";
+ case WMI_RCP_DELBA_CMDID:
+ return "WMI_RCP_DELBA_CMD";
+ case WMI_RCP_ADDBA_RESP_CMDID:
+ return "WMI_RCP_ADDBA_RESP_CMD";
+ case WMI_PS_DEV_PROFILE_CFG_CMDID:
+ return "WMI_PS_DEV_PROFILE_CFG_CMD";
+ case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+ case WMI_ABORT_SCAN_CMDID:
+ return "WMI_ABORT_SCAN_CMD";
+ case WMI_NEW_STA_CMDID:
+ return "WMI_NEW_STA_CMD";
+ case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+ case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+ case WMI_START_SCHED_SCAN_CMDID:
+ return "WMI_START_SCHED_SCAN_CMD";
+ case WMI_STOP_SCHED_SCAN_CMDID:
+ return "WMI_STOP_SCHED_SCAN_CMD";
+ default:
+ return "Untracked CMD";
+ }
+}
+
+static const char *eventid2name(u16 eventid)
+{
+ switch (eventid) {
+ case WMI_NOTIFY_REQ_DONE_EVENTID:
+ return "WMI_NOTIFY_REQ_DONE_EVENT";
+ case WMI_DISCONNECT_EVENTID:
+ return "WMI_DISCONNECT_EVENT";
+ case WMI_SW_TX_COMPLETE_EVENTID:
+ return "WMI_SW_TX_COMPLETE_EVENT";
+ case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+ return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+ case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+ return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+ case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+ return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+ case WMI_FW_READY_EVENTID:
+ return "WMI_FW_READY_EVENT";
+ case WMI_TRAFFIC_RESUME_EVENTID:
+ return "WMI_TRAFFIC_RESUME_EVENT";
+ case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+ case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+ return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+ case WMI_VRING_CFG_DONE_EVENTID:
+ return "WMI_VRING_CFG_DONE_EVENT";
+ case WMI_READY_EVENTID:
+ return "WMI_READY_EVENT";
+ case WMI_RX_MGMT_PACKET_EVENTID:
+ return "WMI_RX_MGMT_PACKET_EVENT";
+ case WMI_TX_MGMT_PACKET_EVENTID:
+ return "WMI_TX_MGMT_PACKET_EVENT";
+ case WMI_SCAN_COMPLETE_EVENTID:
+ return "WMI_SCAN_COMPLETE_EVENT";
+ case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+ return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+ case WMI_CONNECT_EVENTID:
+ return "WMI_CONNECT_EVENT";
+ case WMI_EAPOL_RX_EVENTID:
+ return "WMI_EAPOL_RX_EVENT";
+ case WMI_BA_STATUS_EVENTID:
+ return "WMI_BA_STATUS_EVENT";
+ case WMI_RCP_ADDBA_REQ_EVENTID:
+ return "WMI_RCP_ADDBA_REQ_EVENT";
+ case WMI_DELBA_EVENTID:
+ return "WMI_DELBA_EVENT";
+ case WMI_VRING_EN_EVENTID:
+ return "WMI_VRING_EN_EVENT";
+ case WMI_DATA_PORT_OPEN_EVENTID:
+ return "WMI_DATA_PORT_OPEN_EVENT";
+ case WMI_AOA_MEAS_EVENTID:
+ return "WMI_AOA_MEAS_EVENT";
+ case WMI_TOF_SESSION_END_EVENTID:
+ return "WMI_TOF_SESSION_END_EVENT";
+ case WMI_TOF_GET_CAPABILITIES_EVENTID:
+ return "WMI_TOF_GET_CAPABILITIES_EVENT";
+ case WMI_TOF_SET_LCR_EVENTID:
+ return "WMI_TOF_SET_LCR_EVENT";
+ case WMI_TOF_SET_LCI_EVENTID:
+ return "WMI_TOF_SET_LCI_EVENT";
+ case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+ return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+ case WMI_TOF_CHANNEL_INFO_EVENTID:
+ return "WMI_TOF_CHANNEL_INFO_EVENT";
+ case WMI_TRAFFIC_SUSPEND_EVENTID:
+ return "WMI_TRAFFIC_SUSPEND_EVENT";
+ case WMI_ECHO_RSP_EVENTID:
+ return "WMI_ECHO_RSP_EVENT";
+ case WMI_LED_CFG_DONE_EVENTID:
+ return "WMI_LED_CFG_DONE_EVENT";
+ case WMI_PCP_STARTED_EVENTID:
+ return "WMI_PCP_STARTED_EVENT";
+ case WMI_PCP_STOPPED_EVENTID:
+ return "WMI_PCP_STOPPED_EVENT";
+ case WMI_GET_SSID_EVENTID:
+ return "WMI_GET_SSID_EVENT";
+ case WMI_GET_PCP_CHANNEL_EVENTID:
+ return "WMI_GET_PCP_CHANNEL_EVENT";
+ case WMI_P2P_CFG_DONE_EVENTID:
+ return "WMI_P2P_CFG_DONE_EVENT";
+ case WMI_LISTEN_STARTED_EVENTID:
+ return "WMI_LISTEN_STARTED_EVENT";
+ case WMI_SEARCH_STARTED_EVENTID:
+ return "WMI_SEARCH_STARTED_EVENT";
+ case WMI_DISCOVERY_STOPPED_EVENTID:
+ return "WMI_DISCOVERY_STOPPED_EVENT";
+ case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+ return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+ case WMI_TEMP_SENSE_DONE_EVENTID:
+ return "WMI_TEMP_SENSE_DONE_EVENT";
+ case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+ return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+ case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+ return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+ case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+ return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+ case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+ return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+ case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+ return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+ case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+ return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+ case WMI_START_SCHED_SCAN_EVENTID:
+ return "WMI_START_SCHED_SCAN_EVENT";
+ case WMI_STOP_SCHED_SCAN_EVENTID:
+ return "WMI_STOP_SCHED_SCAN_EVENT";
+ case WMI_SCHED_SCAN_RESULT_EVENTID:
+ return "WMI_SCHED_SCAN_RESULT_EVENT";
+ default:
+ return "Untracked EVENT";
+ }
+}
+
static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
{
struct {
@@ -222,7 +540,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
uint retry;
int rc = 0;
- if (sizeof(cmd) + len > r->entry_size) {
+ if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
@@ -294,7 +612,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
- wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+ cmdid2name(cmdid), cmdid, len);
wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
sizeof(cmd), true);
wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -566,8 +885,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
}
- /* FIXME FW can transmit only ucast frames to peer */
- /* FIXME real ring_id instead of hard coded 0 */
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].status = wil_sta_conn_pending;
@@ -830,6 +1147,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_sched_scan_result_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+ int ch_no;
+ u32 freq;
+ struct ieee80211_channel *channel;
+ s32 signal;
+ __le16 fc;
+ u32 d_len;
+ struct cfg80211_bss *bss;
+
+ if (flen < 0) {
+ wil_err(wil, "sched scan result event too short, len %d\n",
+ len);
+ return;
+ }
+
+ d_len = le32_to_cpu(data->info.len);
+ if (d_len != flen) {
+ wil_err(wil,
+ "sched scan result length mismatch, d_len %d should be %d\n",
+ d_len, flen);
+ return;
+ }
+
+ fc = rx_mgmt_frame->frame_control;
+ if (!ieee80211_is_probe_resp(fc)) {
+ wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+ fc);
+ return;
+ }
+
+ ch_no = data->info.channel + 1;
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+ channel = ieee80211_get_channel(wiphy, freq);
+ if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+ signal = 100 * data->info.rssi;
+ else
+ signal = data->info.sqi;
+
+ wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+ data->info.channel, data->info.mcs, data->info.rssi);
+ wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+ d_len, data->info.qid, data->info.mid, data->info.cid);
+ wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+ d_len, true);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+ d_len, signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+ }
+
+ cfg80211_sched_scan_results(wiphy, 0);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -857,6 +1243,7 @@ static const struct {
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
+ {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
};
/*
@@ -963,8 +1350,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
}
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
- wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
- id, wmi->mid, tstamp);
+ wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+ eventid2name(id), id, wmi->mid, tstamp);
trace_wil6210_wmi_event(wmi, &wmi[1],
len - sizeof(*wmi));
}
@@ -1380,8 +1767,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -1461,7 +1854,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
int rc;
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
- struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+ struct ieee80211_channel *ch = wil->monitor_chandef.chan;
cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
if (ch)
@@ -1801,6 +2194,16 @@ void wmi_event_flush(struct wil6210_priv *wil)
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
}
+static const char *suspend_status2name(u8 status)
+{
+ switch (status) {
+ case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+ return "LINK_NOT_IDLE";
+ default:
+ return "Untracked status";
+ }
+}
+
int wmi_suspend(struct wil6210_priv *wil)
{
int rc;
@@ -1816,7 +2219,7 @@ int wmi_suspend(struct wil6210_priv *wil)
wil->suspend_resp_rcvd = false;
wil->suspend_resp_comp = false;
- reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+ reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -1848,8 +2251,9 @@ int wmi_suspend(struct wil6210_priv *wil)
}
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
- if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
- wil_dbg_pm(wil, "device rejected the suspend\n");
+ if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+ wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+ suspend_status2name(reply.evt.status));
wil->suspend_stats.rejected_by_device++;
}
rc = reply.evt.status;
@@ -1861,21 +2265,50 @@ out:
return rc;
}
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+ string[0] = '\0';
+
+ if (!triggers) {
+ strlcat(string, " UNKNOWN", str_size);
+ return;
+ }
+
+ if (triggers & WMI_RESUME_TRIGGER_HOST)
+ strlcat(string, " HOST", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+ strlcat(string, " UCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+ strlcat(string, " BCAST_RX", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+ strlcat(string, " WMI_EVT", str_size);
+}
+
int wmi_resume(struct wil6210_priv *wil)
{
int rc;
+ char string[100];
struct {
struct wmi_cmd_hdr wmi;
struct wmi_traffic_resume_event evt;
} __packed reply;
reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+ reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
if (rc)
return rc;
+ resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+ sizeof(string));
+ wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+ reply.evt.status ? "failed" : "passed", string,
+ le32_to_cpu(reply.evt.resume_triggers));
return reply.evt.status;
}
@@ -1906,8 +2339,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
void *evt_data = (void *)(&wmi[1]);
u16 id = le16_to_cpu(wmi->command_id);
- wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
- id, wil->reply_id);
+ wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+ eventid2name(id), id, wil->reply_id);
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id) {
WARN_ON(wil->reply_buf);
@@ -2002,3 +2435,159 @@ out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_sets,
+ int n_match_sets)
+{
+ int i;
+
+ if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+ wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+ n_match_sets, WMI_MAX_PNO_SSID_NUM);
+ n_match_sets = WMI_MAX_PNO_SSID_NUM;
+ }
+ cmd->num_of_ssids = n_match_sets;
+
+ for (i = 0; i < n_match_sets; i++) {
+ struct wmi_sched_scan_ssid_match *wmi_match =
+ &cmd->ssid_for_match[i];
+ struct cfg80211_match_set *cfg_match = &match_sets[i];
+ int j;
+
+ wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+ memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+ min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+ wmi_match->rssi_threshold = S8_MIN;
+ if (cfg_match->rssi_thold >= S8_MIN &&
+ cfg_match->rssi_thold <= S8_MAX)
+ wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+ for (j = 0; j < n_ssids; j++)
+ if (wmi_match->ssid_len == ssids[j].ssid_len &&
+ memcmp(wmi_match->ssid, ssids[j].ssid,
+ wmi_match->ssid_len) == 0)
+ wmi_match->add_ssid_to_probe = true;
+ }
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ u32 n_channels,
+ struct ieee80211_channel **channels)
+{
+ int i;
+
+ if (n_channels > WMI_MAX_CHANNEL_NUM) {
+ wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+ n_channels, WMI_MAX_CHANNEL_NUM);
+ n_channels = WMI_MAX_CHANNEL_NUM;
+ }
+ cmd->num_of_channels = n_channels;
+
+ for (i = 0; i < n_channels; i++) {
+ struct ieee80211_channel *cfg_chan = channels[i];
+
+ cmd->channel_list[i] = cfg_chan->hw_value - 1;
+ }
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+ struct wmi_start_sched_scan_cmd *cmd,
+ struct cfg80211_sched_scan_plan *scan_plans,
+ int n_scan_plans)
+{
+ int i;
+
+ if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+ wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+ n_scan_plans, WMI_MAX_PLANS_NUM);
+ n_scan_plans = WMI_MAX_PLANS_NUM;
+ }
+
+ for (i = 0; i < n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+ cmd->scan_plans[i].interval_sec =
+ cpu_to_le16(cfg_plan->interval);
+ cmd->scan_plans[i].num_of_iterations =
+ cpu_to_le16(cfg_plan->iterations);
+ }
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+ struct cfg80211_sched_scan_request *request)
+{
+ int rc;
+ struct wmi_start_sched_scan_cmd cmd = {
+ .min_rssi_threshold = S8_MIN,
+ .initial_delay_sec = cpu_to_le16(request->delay),
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_start_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (request->min_rssi_thold >= S8_MIN &&
+ request->min_rssi_thold <= S8_MAX)
+ cmd.min_rssi_threshold = request->min_rssi_thold;
+
+ wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+ request->match_sets, request->n_match_sets);
+ wmi_sched_scan_set_channels(wil, &cmd,
+ request->n_channels, request->channels);
+ wmi_sched_scan_set_plans(wil, &cmd,
+ request->scan_plans, request->n_scan_plans);
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+ WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "start sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_stop_sched_scan_event evt;
+ } __packed reply;
+
+ if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ reply.evt.result = WMI_PNO_REJECT;
+
+ rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+ WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+ WIL_WMI_CALL_GENERAL_TO_MS);
+ if (rc)
+ return rc;
+
+ if (reply.evt.result != WMI_PNO_SUCCESS) {
+ wil_err(wil, "stop sched scan failed, result %d\n",
+ reply.evt.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee717a4f..d3e75f0ff245 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,8 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_PNO = 15,
+ WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
};
@@ -87,6 +89,8 @@ enum wmi_command_id {
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCHED_SCAN_CMDID = 0x05,
+ WMI_STOP_SCHED_SCAN_CMDID = 0x06,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
@@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
} channel_list[0];
} __packed;
+#define WMI_MAX_PNO_SSID_NUM (16)
+#define WMI_MAX_CHANNEL_NUM (6)
+#define WMI_MAX_PLANS_NUM (2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ s8 rssi_threshold;
+ /* boolean */
+ u8 add_ssid_to_probe;
+ u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+ __le16 interval_sec;
+ __le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+ struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+ u8 num_of_ssids;
+ s8 min_rssi_threshold;
+ u8 channel_list[WMI_MAX_CHANNEL_NUM];
+ u8 num_of_channels;
+ u8 reserved;
+ __le16 initial_delay_sec;
+ struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@@ -1238,6 +1274,9 @@ enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_START_SCHED_SCAN_EVENTID = 0x1005,
+ WMI_STOP_SCHED_SCAN_EVENTID = 0x1006,
+ WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
@@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 rssi;
+ u8 range;
+ u8 sqi;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 qid;
+ /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+ u8 mid;
+ u8 cid;
+ /* From Radio MNGR */
+ u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+ WMI_PNO_SUCCESS = 0x00,
+ WMI_PNO_REJECT = 0x01,
+ WMI_PNO_INVALID_PARAMETERS = 0x02,
+ WMI_PNO_NOT_ENABLED = 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+ /* pno_result */
+ u8 result;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
enum wmi_acs_info_bitmask {
WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
@@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
- u8 mcs;
- s8 rssi;
- u8 range;
- u8 sqi;
- __le16 stype;
- __le16 status;
- __le32 len;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 qid;
- /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
- u8 mid;
- u8 cid;
- /* From Radio MNGR */
- u8 channel;
-} __packed;
-
/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
struct wmi_rf_xpm_read_result_event {
/* enum wmi_fw_status_e - success=0 or fail=1 */
@@ -2267,8 +2331,8 @@ struct wmi_link_maintain_cfg_read_done_event {
} __packed;
enum wmi_traffic_suspend_status {
- WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
- WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
+ WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
+ WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
};
/* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2282,10 +2346,21 @@ enum wmi_traffic_resume_status {
WMI_TRAFFIC_RESUME_FAILED = 0x1,
};
+enum wmi_resume_trigger {
+ WMI_RESUME_TRIGGER_UNKNOWN = 0x0,
+ WMI_RESUME_TRIGGER_HOST = 0x1,
+ WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
+ WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
+ WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+};
+
/* WMI_TRAFFIC_RESUME_EVENTID */
struct wmi_traffic_resume_event {
- /* enum wmi_traffic_resume_status_e */
+ /* enum wmi_traffic_resume_status */
u8 status;
+ u8 reserved[3];
+ /* enum wmi_resume_trigger bitmap */
+ __le32 resume_triggers;
} __packed;
/* Power Save command completion status codes */
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index a5557d70689f..f2a2f41e3c96 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -1031,7 +1031,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
- mdelay(2);
+ usleep_range(2000, 3000);
b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 9f2d0b0cf6e5..2d3a5dd07a3f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -165,7 +165,7 @@ static int brcmf_proto_bcdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
static int
brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
+ void *buf, uint len, int *fwerr)
{
struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
@@ -175,6 +175,7 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+ *fwerr = 0;
ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false);
if (ret < 0) {
brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n",
@@ -211,25 +212,27 @@ retry:
memcpy(buf, info, len);
}
+ ret = 0;
+
/* Check the ERROR flag */
if (flags & BCDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
-
+ *fwerr = le32_to_cpu(msg->status);
done:
return ret;
}
static int
brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len)
+ void *buf, uint len, int *fwerr)
{
struct brcmf_bcdc *bcdc = (struct brcmf_bcdc *)drvr->proto->pd;
struct brcmf_proto_bcdc_dcmd *msg = &bcdc->msg;
- int ret = 0;
+ int ret;
u32 flags, id;
brcmf_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len);
+ *fwerr = 0;
ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true);
if (ret < 0)
goto done;
@@ -249,9 +252,11 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
goto done;
}
+ ret = 0;
+
/* Check the ERROR flag */
if (flags & BCDC_DCMD_ERROR)
- ret = le32_to_cpu(msg->status);
+ *fwerr = le32_to_cpu(msg->status);
done:
return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index cd587325e286..0b68240ec7b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -118,7 +118,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
pdata->oob_irq_flags, "brcmf_oob_intr",
- &sdiodev->func[1]->dev);
+ &sdiodev->func1->dev);
if (ret != 0) {
brcmf_err("request_irq failed %d\n", ret);
return ret;
@@ -132,39 +132,40 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
}
sdiodev->irq_wake = true;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
/* assign GPIO to SDIO core */
addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
- gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+ gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
gpiocontrol |= 0x2;
- brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+ brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
- &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
+ 0xf, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
}
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
- data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+ data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
+ data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
+ SDIO_CCCR_IEN_FUNC0;
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
- data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+ data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
- data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
-
- sdio_release_host(sdiodev->func[1]);
+ data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
+ data, &ret);
+ sdio_release_host(sdiodev->func1);
} else {
brcmf_dbg(SDIO, "Entering\n");
- sdio_claim_host(sdiodev->func[1]);
- sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
- sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
+ sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
+ sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = true;
}
@@ -182,26 +183,26 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
struct brcmfmac_sdio_pd *pdata;
pdata = &sdiodev->settings->bus.sdio;
- sdio_claim_host(sdiodev->func[1]);
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+ sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
if (sdiodev->irq_wake) {
disable_irq_wake(pdata->oob_irq_nr);
sdiodev->irq_wake = false;
}
- free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
+ free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
}
if (sdiodev->sd_irq_requested) {
- sdio_claim_host(sdiodev->func[1]);
- sdio_release_irq(sdiodev->func[2]);
- sdio_release_irq(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_release_irq(sdiodev->func2);
+ sdio_release_irq(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = false;
}
}
@@ -230,271 +231,121 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
sdiodev->state = state;
}
-static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
- uint regaddr, u8 byte)
-{
- int err_ret;
-
- /*
- * Can only directly write to some F0 registers.
- * Handle CCCR_IENx and CCCR_ABORT command
- * as a special case.
- */
- if ((regaddr == SDIO_CCCR_ABORT) ||
- (regaddr == SDIO_CCCR_IENx))
- sdio_writeb(func, byte, regaddr, &err_ret);
- else
- sdio_f0_writeb(func, byte, regaddr, &err_ret);
-
- return err_ret;
-}
-
-static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
- u32 addr, u8 regsz, void *data, bool write)
-{
- struct sdio_func *func;
- int ret = -EINVAL;
-
- brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
- write, fn, addr, regsz);
-
- /* only allow byte access on F0 */
- if (WARN_ON(regsz > 1 && !fn))
- return -EINVAL;
- func = sdiodev->func[fn];
-
- switch (regsz) {
- case sizeof(u8):
- if (write) {
- if (fn)
- sdio_writeb(func, *(u8 *)data, addr, &ret);
- else
- ret = brcmf_sdiod_f0_writeb(func, addr,
- *(u8 *)data);
- } else {
- if (fn)
- *(u8 *)data = sdio_readb(func, addr, &ret);
- else
- *(u8 *)data = sdio_f0_readb(func, addr, &ret);
- }
- break;
- case sizeof(u16):
- if (write)
- sdio_writew(func, *(u16 *)data, addr, &ret);
- else
- *(u16 *)data = sdio_readw(func, addr, &ret);
- break;
- case sizeof(u32):
- if (write)
- sdio_writel(func, *(u32 *)data, addr, &ret);
- else
- *(u32 *)data = sdio_readl(func, addr, &ret);
- break;
- default:
- brcmf_err("invalid size: %d\n", regsz);
- break;
- }
-
- if (ret)
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
-
- return ret;
-}
-
-static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 regsz, void *data, bool write)
-{
- u8 func;
- s32 retry = 0;
- int ret;
-
- if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
- return -ENOMEDIUM;
-
- /*
- * figure out how to read the register based on address range
- * 0x00 ~ 0x7FF: function 0 CCCR and FBR
- * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
- * The rest: function 1 silicon backplane core registers
- */
- if ((addr & ~REG_F0_REG_MASK) == 0)
- func = SDIO_FUNC_0;
- else
- func = SDIO_FUNC_1;
-
- do {
- if (!write)
- memset(data, 0, regsz);
- /* for retry wait for 1 ms till bus get settled down */
- if (retry)
- usleep_range(1000, 2000);
- ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
- data, write);
- } while (ret != 0 && ret != -ENOMEDIUM &&
- retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
- if (ret == -ENOMEDIUM)
- brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
- else if (ret != 0) {
- /*
- * SleepCSR register access can fail when
- * waking up the device so reduce this noise
- * in the logs.
- */
- if (addr != SBSDIO_FUNC1_SLEEPCSR)
- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", func, addr, ret);
- else
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", func, addr, ret);
- }
- return ret;
-}
-
-static int
-brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
+ u32 addr)
{
+ u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
int err = 0, i;
- u8 addr[3];
-
- if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
- return -ENOMEDIUM;
-
- addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
- addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
- addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
-
- for (i = 0; i < 3; i++) {
- err = brcmf_sdiod_regrw_helper(sdiodev,
- SBSDIO_FUNC1_SBADDRLOW + i,
- sizeof(u8), &addr[i], true);
- if (err) {
- brcmf_err("failed at addr: 0x%0x\n",
- SBSDIO_FUNC1_SBADDRLOW + i);
- break;
- }
- }
- return err;
-}
+ if (bar0 == sdiodev->sbwad)
+ return 0;
-static int
-brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-{
- uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
+ v = bar0 >> 8;
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
- if (err)
- return err;
+ for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
+ v & 0xff, &err);
+ if (!err)
sdiodev->sbwad = bar0;
- }
-
- *addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (width == 4)
- *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
- return 0;
+ return err;
}
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- u8 data;
+ u32 data = 0;
int retval;
- brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- false);
- brcmf_dbg(SDIO, "data:0x%02x\n", data);
+ retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+ if (retval)
+ goto out;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ data = sdio_readl(sdiodev->func1, addr, &retval);
+out:
if (ret)
*ret = retval;
return data;
}
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret)
{
- u32 data = 0;
int retval;
- brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
- retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+ retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (retval)
- goto done;
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- false);
- brcmf_dbg(SDIO, "data:0x%08x\n", data);
-
-done:
- if (ret)
- *ret = retval;
+ goto out;
- return data;
-}
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u8 data, int *ret)
-{
- int retval;
+ sdio_writel(sdiodev->func1, data, addr, &retval);
- brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- true);
+out:
if (ret)
*ret = retval;
}
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
- u32 data, int *ret)
+static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, u32 addr,
+ struct sk_buff *skb)
{
- int retval;
+ unsigned int req_sz;
+ int err;
- brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
- retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
- if (retval)
- goto done;
- retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
- true);
+ /* Single skb use the standard mmc interface */
+ req_sz = skb->len + 3;
+ req_sz &= (uint)~3;
-done:
- if (ret)
- *ret = retval;
+ switch (func->num) {
+ case 1:
+ err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
+ req_sz);
+ break;
+ case 2:
+ err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
+ break;
+ default:
+ /* bail out as things are really fishy here */
+ WARN(1, "invalid sdio function number: %d\n", func->num);
+ err = -ENOMEDIUM;
+ };
+
+ if (err == -ENOMEDIUM)
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
+ return err;
}
-static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
- bool write, u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, u32 addr,
+ struct sk_buff *skb)
{
unsigned int req_sz;
int err;
/* Single skb use the standard mmc interface */
- req_sz = pkt->len + 3;
+ req_sz = skb->len + 3;
req_sz &= (uint)~3;
- if (write)
- err = sdio_memcpy_toio(sdiodev->func[fn], addr,
- ((u8 *)(pkt->data)), req_sz);
- else if (fn == 1)
- err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
- addr, req_sz);
- else
- /* function 2 read is FIFO operation */
- err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
- req_sz);
+ err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
+
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
return err;
}
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
- * @fn: SDIO function number
+ * @func: SDIO function
* @write: direction flag
* @addr: dongle memory address as source/destination
* @pkt: skb pointer
@@ -503,7 +354,8 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
-static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func,
bool write, u32 addr,
struct sk_buff_head *pktlist)
{
@@ -529,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
req_sz = 0;
skb_queue_walk(pktlist, pkt_next)
req_sz += pkt_next->len;
- req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+ req_sz = ALIGN(req_sz, func->cur_blksize);
while (req_sz > PAGE_SIZE) {
pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
if (pkt_next == NULL) {
@@ -548,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
- func_blk_sz = sdiodev->func[fn]->cur_blksize;
+ func_blk_sz = func->cur_blksize;
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
@@ -565,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
- mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
- mmc_cmd.arg |= 1<<27; /* block mode */
+ mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
+ mmc_cmd.arg |= 1 << 27; /* block mode */
/* for function 1 the addr will be incremented */
- mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+ mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
@@ -614,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
/* incrementing addr for function 1 */
- if (fn == 1)
+ if (func->num == 1)
addr += req_sz;
- mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
- mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
+ mmc_set_data_timeout(&mmc_dat, func->card);
+ mmc_wait_for_req(func->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) {
@@ -686,16 +538,19 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
{
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
goto done;
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
done:
return err;
@@ -706,25 +561,28 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
{
struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
addr, pktq->qlen);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
goto done;
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
if (pktq->qlen == 1)
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
- pktq->next);
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+ pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
- glom_skb);
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
+ glom_skb);
if (err)
goto done;
@@ -733,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
skb_pull(glom_skb, skb->len);
}
} else
- err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
- pktq);
+ err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
+ addr, pktq);
done:
brcmu_pkt_buf_free_skb(glom_skb);
@@ -744,10 +602,11 @@ done:
int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
+
if (!mypkt) {
brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
nbytes);
@@ -756,40 +615,49 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
memcpy(mypkt->data, buf, nbytes);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+ if (err)
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
if (!err)
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
- mypkt);
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
+ mypkt);
brcmu_pkt_buf_free_skb(mypkt);
- return err;
+ return err;
}
int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq)
{
struct sk_buff *skb;
- u32 addr = sdiodev->sbwad;
+ u32 addr = sdiodev->cc_core->base;
int err;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
- err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
return err;
- if (pktq->qlen == 1 || !sdiodev->sg_support)
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ if (pktq->qlen == 1 || !sdiodev->sg_support) {
skb_queue_walk(pktq, skb) {
- err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
- addr, skb);
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
+ addr, skb);
if (err)
break;
}
- else
- err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
- pktq);
+ } else {
+ err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
+ addr, pktq);
+ }
return err;
}
@@ -798,7 +666,7 @@ int
brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
u8 *data, uint size)
{
- int bcmerror = 0;
+ int err = 0;
struct sk_buff *pkt;
u32 sdaddr;
uint dsize;
@@ -818,13 +686,13 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
else
dsize = size;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Do the transfer(s) */
while (size) {
/* Set the backplane window to include the start address */
- bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
- if (bcmerror)
+ err = brcmf_sdiod_set_backplane_window(sdiodev, address);
+ if (err)
break;
brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
@@ -835,11 +703,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
skb_put(pkt, dsize);
- if (write)
+
+ if (write) {
memcpy(pkt->data, data, dsize);
- bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
- sdaddr, pkt);
- if (bcmerror) {
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
+ sdaddr, pkt);
+ } else {
+ err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
+ sdaddr, pkt);
+ }
+
+ if (err) {
brcmf_err("membytes transfer failed\n");
break;
}
@@ -859,24 +733,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
- brcmf_err("FAILED to set window back to 0x%x\n",
- sdiodev->sbwad);
+ sdio_release_host(sdiodev->func1);
- sdio_release_host(sdiodev->func[1]);
-
- return bcmerror;
+ return err;
}
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
{
- char t_func = (char)fn;
brcmf_dbg(SDIO, "Enter\n");
- /* issue abort cmd52 command through F0 */
- brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
- sizeof(t_func), &t_func, true);
+ /* Issue abort cmd52 command through F0 */
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
brcmf_dbg(SDIO, "Exit\n");
return 0;
@@ -890,7 +757,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
uint nents;
int err;
- func = sdiodev->func[2];
+ func = sdiodev->func2;
host = func->card->host;
sdiodev->sg_support = host->max_segs > 1;
max_blocks = min_t(uint, host->max_blk_count, 511u);
@@ -951,17 +818,17 @@ static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
brcmf_sdio_trigger_dpc(sdiodev->bus);
wait_event(sdiodev->freezer->thread_freeze,
atomic_read(expect) == sdiodev->freezer->frozen_count);
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
res = brcmf_sdio_sleep(sdiodev->bus, true);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return res;
}
static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
{
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
brcmf_sdio_sleep(sdiodev->bus, false);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
atomic_set(&sdiodev->freezer->freezing, 0);
complete_all(&sdiodev->freezer->resumed);
}
@@ -1011,19 +878,19 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
/* Disable Function 2 */
- sdio_claim_host(sdiodev->func[2]);
- sdio_disable_func(sdiodev->func[2]);
- sdio_release_host(sdiodev->func[2]);
+ sdio_claim_host(sdiodev->func2);
+ sdio_disable_func(sdiodev->func2);
+ sdio_release_host(sdiodev->func2);
/* Disable Function 1 */
- sdio_claim_host(sdiodev->func[1]);
- sdio_disable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
+ sdio_disable_func(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
- pm_runtime_allow(sdiodev->func[1]->card->host->parent);
+ pm_runtime_allow(sdiodev->func1->card->host->parent);
return 0;
}
@@ -1039,29 +906,27 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
- sdiodev->num_funcs = 2;
+ sdio_claim_host(sdiodev->func1);
- sdio_claim_host(sdiodev->func[1]);
-
- ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
goto out;
}
- ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
goto out;
}
/* increase F2 timeout */
- sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
+ sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
/* Enable Function 1 */
- ret = sdio_enable_func(sdiodev->func[1]);
- sdio_release_host(sdiodev->func[1]);
+ ret = sdio_enable_func(sdiodev->func1);
+ sdio_release_host(sdiodev->func1);
if (ret) {
brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
@@ -1077,7 +942,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
- brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
+ brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -1138,6 +1003,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
dev = &func->dev;
+
+ /* Set MMC_QUIRK_LENIENT_FN0 for this card */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
/* prohibit ACPI power management for this device */
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
@@ -1161,17 +1030,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
/* store refs to functions used. mmc_card does
* not hold the F0 function pointer.
*/
- sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
- sdiodev->func[0]->num = 0;
- sdiodev->func[1] = func->card->sdio_func[0];
- sdiodev->func[2] = func;
+ sdiodev->func1 = func->card->sdio_func[0];
+ sdiodev->func2 = func;
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
bus_if->proto_type = BRCMF_PROTO_BCDC;
dev_set_drvdata(&func->dev, bus_if);
- dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
- sdiodev->dev = &sdiodev->func[1]->dev;
+ dev_set_drvdata(&sdiodev->func1->dev, bus_if);
+ sdiodev->dev = &sdiodev->func1->dev;
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
@@ -1187,8 +1054,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
fail:
dev_set_drvdata(&func->dev, NULL);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- kfree(sdiodev->func[0]);
+ dev_set_drvdata(&sdiodev->func1->dev, NULL);
kfree(sdiodev);
kfree(bus_if);
return err;
@@ -1217,11 +1083,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
/* only proceed with rest of cleanup if func 1 */
brcmf_sdiod_remove(sdiodev);
- dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
- dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
+ dev_set_drvdata(&sdiodev->func1->dev, NULL);
+ dev_set_drvdata(&sdiodev->func2->dev, NULL);
kfree(bus_if);
- kfree(sdiodev->func[0]);
kfree(sdiodev);
}
@@ -1247,7 +1112,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
- if (func->num != SDIO_FUNC_1)
+ if (func->num != 1)
return 0;
@@ -1264,7 +1129,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
else
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
- if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}
@@ -1276,7 +1141,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
- if (func->num != SDIO_FUNC_2)
+ if (func->num != 2)
return 0;
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index c5d1a1cbf601..f7b30ce2300d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
+ case BRCM_CC_4345_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d0609d30..9be0b051066a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
err = request_firmware(&clm, clm_name, dev);
if (err) {
- if (err == -ENOENT) {
- brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
- return 0;
- }
- brcmf_err("request CLM blob file failed (%d)\n", err);
- return err;
+ brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
+ err);
+ return 0;
}
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 53ae30259989..47de35a33853 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
}
}
+#define MAX_CAPS_BUFFER_SIZE 512
static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
{
- char caps[256];
+ char caps[MAX_CAPS_BUFFER_SIZE];
enum brcmf_feat_id id;
- int i;
+ int i, err;
+
+ err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+ if (err) {
+ brcmf_err("could not get firmware cap (%d)\n", err);
+ return;
+ }
- brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
brcmf_dbg(INFO, "[ %s]\n", caps);
for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f6a2df94dba7..f2cfdd3b2bf1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -107,7 +107,7 @@ static s32
brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
{
struct brcmf_pub *drvr = ifp->drvr;
- s32 err;
+ s32 err, fwerr;
if (drvr->bus_if->state != BRCMF_BUS_UP) {
brcmf_err("bus is down. we have nothing to do.\n");
@@ -117,16 +117,20 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd, data, len);
+ err = brcmf_proto_set_dcmd(drvr, ifp->ifidx, cmd,
+ data, len, &fwerr);
else
- err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd, data, len);
-
- if (err >= 0)
- return 0;
-
- brcmf_dbg(FIL, "Failed: %s (%d)\n",
- brcmf_fil_get_errstr((u32)(-err)), err);
-
+ err = brcmf_proto_query_dcmd(drvr, ifp->ifidx, cmd,
+ data, len, &fwerr);
+
+ if (err) {
+ brcmf_dbg(FIL, "Failed: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-err)), err);
+ } else if (fwerr < 0) {
+ brcmf_dbg(FIL, "Firmware error: %s (%d)\n",
+ brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
+ err = -EBADE;
+ }
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index d2c834c3b2fc..e212a791a072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -477,7 +477,7 @@ static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len, int *fwerr)
{
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct sk_buff *skb = NULL;
@@ -485,6 +485,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
int err;
brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
+ *fwerr = 0;
msgbuf->ctl_completed = false;
err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
if (err)
@@ -508,14 +509,15 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
}
brcmu_pkt_buf_free_skb(skb);
- return msgbuf->ioctl_resp_status;
+ *fwerr = msgbuf->ioctl_resp_status;
+ return 0;
}
static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len, int *fwerr)
{
- return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
+ return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 3c87157f5b85..8752707557bf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1251,14 +1251,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
u64 address;
u32 addr;
- devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
- &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
+ devinfo->shared.scratch =
+ dma_zalloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
+ &devinfo->shared.scratch_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
- memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
-
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
address = (u64)devinfo->shared.scratch_dmahandle;
@@ -1268,14 +1268,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
- devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
- BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
- &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
+ devinfo->shared.ringupd =
+ dma_zalloc_coherent(&devinfo->pdev->dev,
+ BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
+ &devinfo->shared.ringupd_dmahandle,
+ GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
- memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
-
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
address = (u64)devinfo->shared.ringupd_dmahandle;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 2404f8a7c31c..8a8e08f09ea0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -30,9 +30,9 @@ struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb, struct brcmf_if **ifp);
int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
- void *buf, uint len);
+ void *buf, uint len, int *fwerr);
int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
- uint len);
+ uint len, int *fwerr);
int (*tx_queue_data)(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *skb);
int (*txdata)(struct brcmf_pub *drvr, int ifidx, u8 offset,
@@ -71,14 +71,16 @@ static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
}
static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len,
+ int *fwerr)
{
- return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len);
+ return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len,fwerr);
}
static inline int brcmf_proto_set_dcmd(struct brcmf_pub *drvr, int ifidx,
- uint cmd, void *buf, uint len)
+ uint cmd, void *buf, uint len,
+ int *fwerr)
{
- return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len);
+ return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
}
static inline int brcmf_proto_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index cdf9e4161592..08686147b59d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -159,8 +159,8 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
+#define SD_REG(field) \
+ (offsetof(struct sdpcmd_regs, field))
/* SDIO function 1 register CHIPCLKCSR */
/* Force ALP request to backplane */
@@ -436,6 +436,7 @@ struct brcmf_sdio_count {
struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct brcmf_chip *ci; /* Chip info struct */
+ struct brcmf_core *sdio_core; /* sdio core info struct */
u32 hostintmask; /* Copy of Host Interrupt Mask */
atomic_t intstatus; /* Intstatus bits (events) pending */
@@ -659,32 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus)
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
}
-/*
- * Reads a register in the SDIO hardware block. This block occupies a series of
- * adresses on the 32 bit backplane bus.
- */
-static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
-{
- struct brcmf_core *core;
- int ret;
-
- core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
-
- return ret;
-}
-
-static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
-{
- struct brcmf_core *core;
- int ret;
-
- core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
-
- return ret;
-}
-
static int
brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
@@ -697,8 +672,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
if (on) {
/* device WAKEUP through KSO:
@@ -724,7 +698,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
* just one write attempt may fail,
* read it back until it matches written value
*/
- rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
&err);
if (!err) {
if ((rd_val & bmask) == cmp_val)
@@ -734,9 +708,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
/* bail out upon subsequent access errors */
if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
break;
+
udelay(KSO_WAIT_US);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- wr_val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
+ &err);
+
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
if (try_cnt > 2)
@@ -772,15 +748,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_err("HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_err("HT Avail read error: %d\n", err);
@@ -790,35 +766,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
if (err) {
- brcmf_err("Devctl error setting CA: %d\n",
- err);
+ brcmf_err("Devctl error setting CA: %d\n", err);
return -EBADE;
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if (time_after(jiffies, timeout))
@@ -852,16 +827,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_err("Failed access turning clock off: %d\n",
@@ -951,14 +926,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
/* Going to sleep */
if (sleep) {
- clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+ clkcsr = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
brcmf_dbg(SDIO, "no clock, set ALP\n");
- brcmf_sdiod_regwb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_ALP_AVAIL_REQ, &err);
+ brcmf_sdiod_writeb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
}
err = brcmf_sdio_kso_control(bus, false);
} else {
@@ -1004,7 +979,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared_le sh_le;
__le32 addr_le;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_bus_sleep(bus, false, false);
/*
@@ -1038,7 +1013,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
if (rv < 0)
goto fail;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
@@ -1060,7 +1035,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
fail:
brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
rv, addr);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return rv;
}
@@ -1079,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
@@ -1087,12 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
brcmf_dbg(SDIO, "Enter\n");
/* Read mailbox data and ack that we did so */
- ret = r_sdreg32(bus, &hmb_data,
- offsetof(struct sdpcmd_regs, tohostmailboxdata));
+ hmb_data = brcmf_sdiod_readl(sdiod,
+ core->base + SD_REG(tohostmailboxdata),
+ &ret);
+
+ if (!ret)
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+ SMB_INT_ACK, &ret);
- if (ret == 0)
- w_sdreg32(bus, SMB_INT_ACK,
- offsetof(struct sdpcmd_regs, tosbmailbox));
bus->sdcnt.f1regdata += 2;
/* dongle indicates the firmware has halted/crashed */
@@ -1166,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
uint retries = 0;
u16 lastrbc;
u8 hi, lo;
@@ -1176,18 +1157,18 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
+ &err);
bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCHI, &err);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_RFRAMEBCLO, &err);
+ hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
+ &err);
+ lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
+ &err);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1207,8 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (rtx) {
bus->sdcnt.rxrtx++;
- err = w_sdreg32(bus, SMB_NAK,
- offsetof(struct sdpcmd_regs, tosbmailbox));
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+ SMB_NAK, &err);
bus->sdcnt.f1regdata++;
if (err == 0)
@@ -1228,13 +1209,13 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
brcmf_err("sdio error, abort command and terminate frame\n");
bus->sdcnt.tx_sderrs++;
- brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+ brcmf_sdiod_abort(sdiodev, sdiodev->func2);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
- hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -1584,10 +1565,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
&bus->glom, dlen);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe */
@@ -1595,11 +1576,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return 0;
}
@@ -1609,10 +1590,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
BRCMF_SDIO_FT_SUPER);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
/* Remove superframe header, remember offset */
@@ -1628,10 +1609,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
BRCMF_SDIO_FT_SUB);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@@ -1640,11 +1621,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode) {
/* Terminate frame on error */
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = 0;
return 0;
}
@@ -1852,7 +1833,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_left = rd->len;
/* read header first for unknow frame length */
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (!rd->len) {
ret = brcmf_sdiod_recv_buf(bus->sdiodev,
bus->rxhdr, BRCMF_FIRSTREAD);
@@ -1862,7 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret);
bus->sdcnt.rx_hdrfail++;
brcmf_sdio_rxfail(bus, true, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
@@ -1872,7 +1853,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
BRCMF_SDIO_FT_NORMAL)) {
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
if (!bus->rxpending)
break;
else
@@ -1888,7 +1869,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1905,7 +1886,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
skb_pull(pkt, head_read);
@@ -1913,16 +1894,16 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
bus->sdcnt.f2rxdata++;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
if (ret < 0) {
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true,
RETRYCHAN(rd->channel));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
continue;
}
@@ -1933,7 +1914,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
@@ -1946,11 +1927,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
brcmf_sdio_rxfail(bus, true, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
rd->len_nxtfrm = rd_new.len_nxtfrm;
rd->channel = rd_new.channel;
rd->dat_offset = rd_new.dat_offset;
@@ -1966,9 +1947,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd_new.seq_num);
/* Force retry w/normal header read */
rd->len = 0;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, true);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
@@ -1991,9 +1972,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@@ -2091,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
int ntail, ret;
sdiodev = bus->sdiodev;
- blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ blksize = sdiodev->func2->cur_blksize;
/* sg entry alignment should be a divisor of block size */
WARN_ON(blksize % bus->sgentry_align);
@@ -2270,14 +2251,14 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
if (ret)
goto done;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
if (ret < 0)
brcmf_sdio_txfail(bus);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
done:
brcmf_sdio_txpkt_postp(bus, pktq);
@@ -2295,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
struct sk_buff_head pktq;
+ u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
u32 intstatus = 0;
int ret = 0, prec_out, i;
uint cnt = 0;
@@ -2332,11 +2314,11 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr) {
/* Check device status, signal pending interrupt */
- sdio_claim_host(bus->sdiodev->func[1]);
- ret = r_sdreg32(bus, &intstatus,
- offsetof(struct sdpcmd_regs,
- intstatus));
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
+ intstatus = brcmf_sdiod_readl(bus->sdiodev,
+ intstat_addr, &ret);
+ sdio_release_host(bus->sdiodev->func1);
+
bus->sdcnt.f2txdata++;
if (ret != 0)
break;
@@ -2419,12 +2401,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
static void brcmf_sdio_bus_stop(struct device *dev)
{
- u32 local_hostintmask;
- u8 saveclk;
- int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_core *core = bus->sdio_core;
+ u32 local_hostintmask;
+ u8 saveclk;
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -2435,35 +2418,37 @@ static void brcmf_sdio_bus_stop(struct device *dev)
}
if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Enable clock for device interrupts */
brcmf_sdio_bus_sleep(bus, false, false);
/* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+ brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
+ 0, NULL);
+
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
/* Force backplane clocks to assure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
&err);
if (!err)
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
if (err)
brcmf_err("Failed to force clock for F2: err %d\n",
err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
- sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(sdiodev->func2);
/* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus));
+ brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
+ local_hostintmask, NULL);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
}
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2501,15 +2486,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- struct brcmf_core *buscore;
+ struct brcmf_core *core = bus->sdio_core;
u32 addr;
unsigned long val;
int ret;
- buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+ addr = core->base + SD_REG(intstatus);
- val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+ val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
return ret;
@@ -2519,7 +2503,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
/* Clear interrupts */
if (val) {
- brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+ brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
atomic_or(val, &bus->intstatus);
}
@@ -2529,7 +2513,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
u32 newstatus = 0;
+ u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
unsigned long intstatus;
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt; /* Temporary counter of tx/rx frames */
@@ -2537,7 +2523,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* If waiting for HTAVAIL, check status */
if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
@@ -2545,23 +2531,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ &err);
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdiod_regrb(bus->sdiodev,
+ devctl = brcmf_sdiod_readb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdiod_writeb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, devctl, &err);
bus->clkstate = CLK_AVAIL;
}
}
@@ -2584,11 +2570,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
- err = w_sdreg32(bus, I_HMB_FC_CHANGE,
- offsetof(struct sdpcmd_regs, intstatus));
+ brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
+
+ newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
- err = r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus));
bus->sdcnt.f1regdata += 2;
atomic_set(&bus->fcstate,
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -2601,7 +2586,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
intstatus |= brcmf_sdio_hostmail(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
@@ -2644,7 +2629,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
data_ok(bus)) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
bus->ctrl_frame_len);
@@ -2652,7 +2637,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
wmb();
bus->ctrl_frame_stat = false;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
@@ -2668,14 +2653,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0);
if (bus->ctrl_frame_stat) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
bus->ctrl_frame_err = -ENODEV;
wmb();
bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
@@ -2890,13 +2875,13 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
CTL_DONE_TIMEOUT);
ret = 0;
if (bus->ctrl_frame_stat) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
brcmf_dbg(SDIO, "ctrl_frame timeout\n");
bus->ctrl_frame_stat = false;
ret = -ETIMEDOUT;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
if (!ret) {
brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
@@ -3020,7 +3005,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
return 0;
}
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
if (sh->assert_file_addr != 0) {
error = brcmf_sdiod_ramrw(bus->sdiodev, false,
sh->assert_file_addr, (u8 *)file, 80);
@@ -3033,7 +3018,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
if (error < 0)
return error;
}
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
file, sh->assert_line, expr);
@@ -3307,7 +3292,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
int bcmerror;
u32 rstvec;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
rstvec = get_unaligned_le32(fw->data);
@@ -3336,7 +3321,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return bcmerror;
}
@@ -3347,31 +3332,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
- val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+ val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
/* Add CMD14 Support */
- brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
- &err);
+ brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+ (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ &err);
if (err) {
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
return;
}
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HT, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HT, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
return;
@@ -3385,16 +3370,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
/* enable KSO bit */
static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
{
+ struct brcmf_core *core = bus->sdio_core;
u8 val;
int err = 0;
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+ if (core->rev < 12)
return 0;
- val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+ val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3403,8 +3389,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
- val, &err);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+ val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
return err;
@@ -3420,6 +3406,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_core *core = bus->sdio_core;
uint pad_size;
u32 value;
int err;
@@ -3428,7 +3415,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+ if (core->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3449,7 +3436,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
if (sdiodev->sg_support) {
bus->txglom = false;
value = 1;
- pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
+ pad_size = bus->sdiodev->func2->cur_blksize << 1;
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@@ -3491,7 +3478,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
address = bus->ci->rambase;
offset = err = 0;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
while (offset < mem_size) {
len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
mem_size - offset;
@@ -3507,7 +3494,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
}
done:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return err;
}
@@ -3564,11 +3551,10 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->dpc_triggered) {
u8 devpend;
- sdio_claim_host(bus->sdiodev->func[1]);
- devpend = brcmf_sdiod_regrb(bus->sdiodev,
- SDIO_CCCR_INTx,
- NULL);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
+ devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
+ SDIO_CCCR_INTx, NULL);
+ sdio_release_host(bus->sdiodev->func1);
intstatus = devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
}
@@ -3594,13 +3580,13 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL);
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* Make sure backplane clock is on */
brcmf_sdio_bus_sleep(bus, false, false);
if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
}
#endif /* DEBUG */
@@ -3613,11 +3599,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->idlecount++;
if (bus->idlecount > bus->idletime) {
brcmf_dbg(SDIO, "idle\n");
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
bus->idlecount = 0;
brcmf_sdio_bus_sleep(bus, true, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
} else {
bus->idlecount = 0;
@@ -3705,12 +3691,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
str_tab[i].strength, drivestrength, cc_data_temp);
@@ -3725,7 +3711,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_err("error writing for HT off\n");
return err;
@@ -3733,8 +3719,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -3742,10 +3727,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+
if (!SBSDIO_ALPAV(clkval)) {
brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
clkval);
@@ -3753,11 +3739,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
@@ -3766,13 +3752,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
u32 rstvec)
{
struct brcmf_sdio_dev *sdiodev = ctx;
- struct brcmf_core *core;
+ struct brcmf_core *core = sdiodev->bus->sdio_core;
u32 reg_addr;
/* clear all interrupts */
- core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
- reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+ reg_addr = core->base + SD_REG(intstatus);
+ brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
if (rstvec)
/* Write reset vector to address 0 */
@@ -3785,16 +3770,25 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
struct brcmf_sdio_dev *sdiodev = ctx;
u32 val, rev;
- val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
- sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
- addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ val = brcmf_sdiod_readl(sdiodev, addr, NULL);
+
+ /*
+ * this is a bit of special handling if reading the chipcommon chipid
+ * register. The 4339 is a next-gen of the 4335. It uses the same
+ * SDIO device id as 4335 and the chipid register returns 4335 as well.
+ * It can be identified as 4339 by looking at the chip revision. It
+ * is corrected here so the chip.c module has the right info.
+ */
+ if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) &&
+ (sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
+ sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
val &= ~CID_ID_MASK;
val |= BRCM_CC_4339_CHIP_ID;
}
}
+
return val;
}
@@ -3802,7 +3796,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
{
struct brcmf_sdio_dev *sdiodev = ctx;
- brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+ brcmf_sdiod_writel(sdiodev, addr, val, NULL);
}
static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
@@ -3823,21 +3817,21 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
u32 drivestrength;
sdiodev = bus->sdiodev;
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
+ &err);
if (!err)
- clkctl = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3851,6 +3845,17 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
bus->ci = NULL;
goto fail;
}
+
+ /* Pick up the SDIO core info struct from chip.c */
+ bus->sdio_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ if (!bus->sdio_core)
+ goto fail;
+
+ /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
+ sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
+ if (!sdiodev->cc_core)
+ goto fail;
+
sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
BRCMF_BUSTYPE_SDIO,
bus->ci->chip,
@@ -3879,8 +3884,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
- if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
- ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+ if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
+ ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
#endif
@@ -3897,29 +3902,29 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
if (err)
goto fail;
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
if (err)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
- reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
+ reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
+ brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3940,7 +3945,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
return true;
fail:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
return false;
}
@@ -4020,22 +4025,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
- struct brcmf_bus *bus_if;
- struct brcmf_sdio_dev *sdiodev;
- struct brcmf_sdio *bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+ struct brcmf_core *core = bus->sdio_core;
u8 saveclk;
brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
- bus_if = dev_get_drvdata(dev);
- sdiodev = bus_if->bus_priv.sdio;
+
if (err)
goto fail;
if (!bus_if->drvr)
return;
- bus = sdiodev->bus;
-
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4047,7 +4051,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
bus->sdcnt.tickcnt = 0;
brcmf_sdio_wd_timer(bus, true);
- sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_host(sdiodev->func1);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
@@ -4055,10 +4059,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
goto release;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4066,10 +4070,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
/* Enable function 2 (frame transfers) */
- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata));
- err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
+ SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
+ err = sdio_enable_func(sdiodev->func2);
brcmf_dbg(INFO, "enable F2: err=%d\n", err);
@@ -4077,13 +4081,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
- w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask));
+ brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
+ bus->hostintmask, NULL);
+
- brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
- sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(sdiodev->func2);
goto release;
}
@@ -4091,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
+ brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
}
if (err == 0) {
@@ -4108,7 +4113,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (err != 0)
brcmf_sdio_clkctl(bus, CLK_NONE, false);
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
err = brcmf_bus_started(dev);
if (err != 0) {
@@ -4118,10 +4123,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
return;
release:
- sdio_release_host(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func1);
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
- device_release_driver(&sdiodev->func[2]->dev);
+ device_release_driver(&sdiodev->func2->dev);
device_release_driver(dev);
}
@@ -4148,7 +4153,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* single-threaded workqueue */
wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
- dev_name(&sdiodev->func[1]->dev));
+ dev_name(&sdiodev->func1->dev));
if (!wq) {
brcmf_err("insufficient memory to create txworkqueue\n");
goto fail;
@@ -4174,7 +4179,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_wdog/%s",
- dev_name(&sdiodev->func[1]->dev));
+ dev_name(&sdiodev->func1->dev));
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL;
@@ -4200,7 +4205,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
/* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->blocksize = bus->sdiodev->func2->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
/* Allocate buffers */
@@ -4216,17 +4221,17 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
}
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
/* Disable F2 to clear any intermediate frame state on the dongle */
- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+ sdio_disable_func(bus->sdiodev->func2);
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
@@ -4278,7 +4283,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->ci) {
if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is
@@ -4288,7 +4293,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
msleep(20);
brcmf_chip_set_passive(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
}
brcmf_chip_detach(bus->ci);
}
@@ -4335,9 +4340,9 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
{
int ret;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdio_bus_sleep(bus, sleep, false);
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func1);
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index f3da32fc6360..7faed831f07d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -21,10 +21,6 @@
#include <linux/firmware.h>
#include "firmware.h"
-#define SDIO_FUNC_0 0
-#define SDIO_FUNC_1 1
-#define SDIO_FUNC_2 2
-
#define SDIOD_FBR_SIZE 0x100
/* io_en */
@@ -39,28 +35,29 @@
#define INTR_STATUS_FUNC1 0x2
#define INTR_STATUS_FUNC2 0x4
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS 7
-
/* mask of register map */
#define REG_F0_REG_MASK 0x7FF
#define REG_F1_MISC_MASK 0x1FFFF
-/* as of sdiod rev 0, supports 3 functions */
-#define SBSDIO_NUM_FUNCTION 3
-
/* function 0 vendor specific CCCR registers */
+
#define SDIO_CCCR_BRCM_CARDCAP 0xf0
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
-#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
-#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
-#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET 0x02
-#define SDIO_CCCR_BRCM_SEPINT 0xf2
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT BIT(1)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT BIT(2)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC BIT(3)
+
+/* Interrupt enable bits for each function */
+#define SDIO_CCCR_IEN_FUNC0 BIT(0)
+#define SDIO_CCCR_IEN_FUNC1 BIT(1)
+#define SDIO_CCCR_IEN_FUNC2 BIT(2)
+
+#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET BIT(1)
-#define SDIO_SEPINT_MASK 0x01
-#define SDIO_SEPINT_OE 0x02
-#define SDIO_SEPINT_ACT_HI 0x04
+#define SDIO_CCCR_BRCM_SEPINT 0xf2
+#define SDIO_CCCR_BRCM_SEPINT_MASK BIT(0)
+#define SDIO_CCCR_BRCM_SEPINT_OE BIT(1)
+#define SDIO_CCCR_BRCM_SEPINT_ACT_HI BIT(2)
/* function 1 miscellaneous registers */
@@ -131,11 +128,6 @@
/* with b15, maps to 32-bit SB access */
#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-
-#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
/* Address bits from SBADDR regs */
#define SBSDIO_SBWINDOW_MASK 0xffff8000
@@ -178,9 +170,10 @@ struct brcmf_sdio;
struct brcmf_sdiod_freezer;
struct brcmf_sdio_dev {
- struct sdio_func *func[SDIO_MAX_FUNCS];
- u8 num_funcs; /* Supported funcs on client */
+ struct sdio_func *func1;
+ struct sdio_func *func2;
u32 sbwad; /* Save backplane window address */
+ struct brcmf_core *cc_core; /* chipcommon core info struct */
struct brcmf_sdio *bus;
struct device *dev;
struct brcmf_bus *bus_if;
@@ -296,13 +289,24 @@ struct sdpcmd_regs {
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
-/* sdio device register access interface */
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
- int *ret);
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
- int *ret);
+/* SDIO device register access interface */
+/* Accessors for SDIO Function 0 */
+#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
+ sdio_f0_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
+ sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+/* Accessors for SDIO Function 1 */
+#define brcmf_sdiod_readb(sdiodev, addr, r) \
+ sdio_readb((sdiodev)->func1, (addr), (r))
+
+#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
+ sdio_writeb((sdiodev)->func1, (v), (addr), (ret))
+
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+ int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
@@ -342,7 +346,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
u8 *data, uint size);
/* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
+
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 763e8ba6b178..7e01981bc5c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
rfseq_updategainu_events,
rfseq_updategainu_dlys,
- sizeof(rfseq_updategainu_events) /
- sizeof(rfseq_updategainu_events[0]));
+ ARRAY_SIZE(rfseq_updategainu_events));
mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index dbf50ef6cd75..533bd4b0277e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/kernel.h>
#include <types.h>
#include "phytbl_n.h"
@@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = {
};
const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
- {&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
- 16}
- ,
- {&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]),
- 20, 0, 32}
- ,
- {&gainctrl_lut_core0_rev0,
- sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev0,
- sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]),
- 27, 192, 32}
- ,
-
- {&est_pwr_lut_core0_rev0,
- sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev0,
- sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev0,
- sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev0,
- sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27,
- 64, 8}
- ,
- {&iq_lut_core0_rev0,
- sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev0,
- sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev0,
- sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev0,
- sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448,
- 16}
- ,
+ {&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16},
+ {&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32},
+ {&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32},
+ {&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32},
+ {&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8},
+ {&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8},
+ {&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32},
+ {&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32},
+ {&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16},
+ {&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16},
};
const struct phytbl_info mimophytbl_info_rev0[] = {
- {&frame_struct_rev0,
- sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
- ,
- {&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]),
- 24, 0, 8}
- ,
- {&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12,
- 0, 32}
- ,
- {&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]),
- 14, 0, 32}
- ,
- {&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]),
- 13, 0, 32}
- ,
- {&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]),
- 11, 0, 16}
- ,
- {&tdi_tbl20_ant0_rev0,
- sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev0,
- sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev0,
- sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev0,
- sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768,
- 32}
- ,
- {&chanest_tbl_rev0,
- sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
- ,
- {&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
- 8}
- ,
- {&noise_var_tbl0_rev0,
- sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
- 32}
- ,
- {&noise_var_tbl1_rev0,
- sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128,
- 32}
- ,
+ {&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32},
+ {&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8},
+ {&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32},
+ {&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32},
+ {&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32},
+ {&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16},
+ {&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32},
+ {&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32},
+ {&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8},
+ {&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32},
+ {&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32},
};
-const u32 mimophytbl_info_sz_rev0 =
- sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
-const u32 mimophytbl_info_sz_rev0_volatile =
- sizeof(mimophytbl_info_rev0_volatile) /
- sizeof(mimophytbl_info_rev0_volatile[0]);
+const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0);
+const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile);
static const u16 ant_swctrl_tbl_rev3[] = {
0x0082,
@@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
};
const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
- {&ant_swctrl_tbl_rev3,
- sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
- ,
+ {&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
- {&ant_swctrl_tbl_rev3_1,
- sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
- {&ant_swctrl_tbl_rev3_2,
- sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
- {&ant_swctrl_tbl_rev3_3,
- sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
- 16}
- ,
+ {&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16},
};
const struct phytbl_info mimophytbl_info_rev3[] = {
- {&frame_struct_rev3,
- sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
- ,
- {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
- 11, 0, 16}
- ,
- {&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12,
- 0, 32}
- ,
- {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
- 13, 0, 32}
- ,
- {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
- 14, 0, 32}
- ,
- {&noise_var_tbl_rev3,
- sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32}
- ,
- {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
- 16}
- ,
- {&tdi_tbl20_ant0_rev3,
- sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev3,
- sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev3,
- sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev3,
- sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
- 32}
- ,
- {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
- 20, 0, 32}
- ,
- {&chanest_tbl_rev3,
- sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
- ,
- {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
- 24, 0, 8}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
+ {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+ {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+ {&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32},
+ {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+ {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+ {&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32},
+ {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+ {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+ {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+ {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+ {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}
};
-const u32 mimophytbl_info_sz_rev3 =
- sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
-const u32 mimophytbl_info_sz_rev3_volatile =
- sizeof(mimophytbl_info_rev3_volatile) /
- sizeof(mimophytbl_info_rev3_volatile[0]);
-const u32 mimophytbl_info_sz_rev3_volatile1 =
- sizeof(mimophytbl_info_rev3_volatile1) /
- sizeof(mimophytbl_info_rev3_volatile1[0]);
-const u32 mimophytbl_info_sz_rev3_volatile2 =
- sizeof(mimophytbl_info_rev3_volatile2) /
- sizeof(mimophytbl_info_rev3_volatile2[0]);
-const u32 mimophytbl_info_sz_rev3_volatile3 =
- sizeof(mimophytbl_info_rev3_volatile3) /
- sizeof(mimophytbl_info_rev3_volatile3[0]);
+const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3);
+const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile);
+const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1);
+const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2);
+const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3);
static const u32 tmap_tbl_rev7[] = {
0x8a88aa80,
@@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
};
const struct phytbl_info mimophytbl_info_rev7[] = {
- {&frame_struct_rev3,
- sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
- ,
- {&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
- 11, 0, 16}
- ,
- {&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12,
- 0, 32}
- ,
- {&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
- 13, 0, 32}
- ,
- {&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
- 14, 0, 32}
- ,
- {&noise_var_tbl_rev7,
- sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
- ,
- {&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
- 16}
- ,
- {&tdi_tbl20_ant0_rev3,
- sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
- 32}
- ,
- {&tdi_tbl20_ant1_rev3,
- sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
- 32}
- ,
- {&tdi_tbl40_ant0_rev3,
- sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
- 32}
- ,
- {&tdi_tbl40_ant1_rev3,
- sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
- 32}
- ,
- {&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
- 20, 0, 32}
- ,
- {&chanest_tbl_rev3,
- sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
- ,
- {&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
- 24, 0, 8}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
- ,
+ {&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+ {&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+ {&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32},
+ {&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+ {&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+ {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+ {&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+ {&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+ {&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+ {&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+ {&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+ {&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+ {&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+ {&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
{&papd_comp_rfpwr_tbl_core0_rev3,
- sizeof(papd_comp_rfpwr_tbl_core0_rev3) /
- sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16}
- ,
+ ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16},
{&papd_comp_rfpwr_tbl_core1_rev3,
- sizeof(papd_comp_rfpwr_tbl_core1_rev3) /
- sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16}
- ,
+ ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16},
{&papd_comp_epsilon_tbl_core0_rev7,
- sizeof(papd_comp_epsilon_tbl_core0_rev7) /
- sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32}
- ,
+ ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32},
{&papd_cal_scalars_tbl_core0_rev7,
- sizeof(papd_cal_scalars_tbl_core0_rev7) /
- sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32}
- ,
+ ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32},
{&papd_comp_epsilon_tbl_core1_rev7,
- sizeof(papd_comp_epsilon_tbl_core1_rev7) /
- sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32}
- ,
+ ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32},
{&papd_cal_scalars_tbl_core1_rev7,
- sizeof(papd_cal_scalars_tbl_core1_rev7) /
- sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32}
- ,
+ ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32},
};
-const u32 mimophytbl_info_sz_rev7 =
- sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7);
const struct phytbl_info mimophytbl_info_rev16[] = {
- {&noise_var_tbl_rev7,
- sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
- ,
- {&est_pwr_lut_core0_rev3,
- sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
- 0, 8}
- ,
- {&est_pwr_lut_core1_rev3,
- sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
- 0, 8}
- ,
- {&adj_pwr_lut_core0_rev3,
- sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
- 64, 8}
- ,
- {&adj_pwr_lut_core1_rev3,
- sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
- 64, 8}
- ,
- {&gainctrl_lut_core0_rev3,
- sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
- 26, 192, 32}
- ,
- {&gainctrl_lut_core1_rev3,
- sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
- 27, 192, 32}
- ,
- {&iq_lut_core0_rev3,
- sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
- ,
- {&iq_lut_core1_rev3,
- sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
- ,
- {&loft_lut_core0_rev3,
- sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
- 16}
- ,
- {&loft_lut_core1_rev3,
- sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
- 16}
- ,
+ {&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+ {&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+ {&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+ {&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+ {&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+ {&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+ {&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+ {&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+ {&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+ {&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+ {&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
};
-const u32 mimophytbl_info_sz_rev16 =
- sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff136f299a0a..e6205eae51fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -9,12 +9,13 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644
index 000000000000..48f6f80eb24b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX 36
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN 24
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION 0x0a1d
+#define IWL_22000_TX_POWER_VERSION 0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET 0x880000
+#define IWL_22000_DCCM2_LEN 0x8000
+#define IWL_22000_SMEM_OFFSET 0x400000
+#define IWL_22000_SMEM_LEN 0xD0000
+
+#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+ IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000 10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .num_of_queues = 512,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000 \
+ .ucode_api_max = IWL_22000_UCODE_API_MAX, \
+ .ucode_api_min = IWL_22000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_22000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl_22000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL_22000_DCCM_OFFSET, \
+ .dccm_len = IWL_22000_DCCM_LEN, \
+ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \
+ .dccm2_len = IWL_22000_DCCM2_LEN, \
+ .smem_offset = IWL_22000_SMEM_OFFSET, \
+ .smem_len = IWL_22000_SMEM_LEN, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .apmg_not_supported = true, \
+ .mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = true, \
+ .use_tfh = true, \
+ .rf_id = true, \
+ .gen2 = true, \
+ .nvm_type = IWL_NVM_EXT, \
+ .dbgc_supported = true, \
+ .tx_cmd_queue_size = 32, \
+ .min_umac_error_event_table = 0x400000
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_HR_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+ .name = "Intel(R) Dual Band Wireless AC 22000",
+ .fw_name_pre = IWL_22000_JF_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_F0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+ IWL_DEVICE_22000,
+ .ht_params = &iwl_22000_ht_params,
+ .nvm_ver = IWL_22000_NVM_VERSION,
+ .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 9bb7c19d48eb..3f4d9bac9f73 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 34
-#define IWL8265_UCODE_API_MAX 34
+#define IWL8000_UCODE_API_MAX 36
+#define IWL8265_UCODE_API_MAX 36
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 22
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e7e75b458005..90a1d14cf7d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 34
+#define IWL9000_UCODE_API_MAX 36
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
deleted file mode 100644
index 705f83b02e13..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-
-/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 34
-
-/* Lowest firmware API version supported */
-#define IWL_A000_UCODE_API_MIN 24
-
-/* NVM versions */
-#define IWL_A000_NVM_VERSION 0x0a1d
-#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
-
-/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_A000_DCCM2_OFFSET 0x880000
-#define IWL_A000_DCCM2_LEN 0x8000
-#define IWL_A000_SMEM_OFFSET 0x400000
-#define IWL_A000_SMEM_LEN 0xD0000
-
-#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_A000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
-
-#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
-
-#define NVM_HW_SECTION_NUM_FAMILY_A000 10
-
-static const struct iwl_base_params iwl_a000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
- .num_of_queues = 512,
- .shadow_ram_support = true,
- .led_compensation = 57,
- .wd_timeout = IWL_LONG_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = true,
- .pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl_a000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-#define IWL_DEVICE_A000 \
- .ucode_api_max = IWL_A000_UCODE_API_MAX, \
- .ucode_api_min = IWL_A000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_A000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .base_params = &iwl_a000_base_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \
- .non_shared_ant = ANT_A, \
- .dccm_offset = IWL_A000_DCCM_OFFSET, \
- .dccm_len = IWL_A000_DCCM_LEN, \
- .dccm2_offset = IWL_A000_DCCM2_OFFSET, \
- .dccm2_len = IWL_A000_DCCM2_LEN, \
- .smem_offset = IWL_A000_SMEM_OFFSET, \
- .smem_len = IWL_A000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = true, \
- .use_tfh = true, \
- .rf_id = true, \
- .gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .tx_cmd_queue_size = 32, \
- .min_umac_error_event_table = 0x400000
-
-const struct iwl_cfg iwla000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .cdb = true,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_JF_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_F0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_JF_B0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_A0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 3684a3e180e5..007bfe7656a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -95,8 +95,8 @@ enum {
#define IWL_ALIVE_FLG_RFKILL BIT(0)
struct iwl_lmac_alive {
- __le32 ucode_minor;
__le32 ucode_major;
+ __le32 ucode_minor;
u8 ver_subtype;
u8 ver_type;
u8 mac;
@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
struct iwl_umac_alive {
- __le32 umac_minor; /* UMAC version: minor */
__le32 umac_major; /* UMAC version: major */
+ __le32 umac_minor; /* UMAC version: minor */
__le32 error_info_addr; /* SRAM address for UMAC error log */
__le32 dbg_print_buff_addr;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index d09555afe2c5..87c1ddea75ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 {
BT_MBOX(3, UPDATE_REQUEST, 21, 1),
};
-enum iwl_bt_mxbox_dw4 {
- BT_MBOX(4, ATS_BT_INTERVAL, 0, 7),
- BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7),
-};
-
#define BT_MBOX_MSG(_notif, _num, _field) \
((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
>> BT_MBOX##_num##_##_field##_POS)
@@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance {
* @reserved: reserved
*/
struct iwl_bt_coex_profile_notif {
- __le32 mbox_msg[8];
- __le32 msg_idx;
- __le32 bt_ci_compliance;
-
- __le32 primary_ch_lut;
- __le32 secondary_ch_lut;
- __le32 bt_activity_grading;
- u8 ttc_status;
- u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
- * @ttc_status: is TTC enabled - one bit per PHY
- * @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
- */
-struct iwl_bt_coex_profile_notif_v4 {
__le32 mbox_msg[4];
__le32 msg_idx;
__le32 bt_ci_compliance;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7ebbf097488b..f285bacc8726 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -215,7 +215,7 @@ enum iwl_legacy_cmds {
/**
* @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
* &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
- * for newer (A000) hardware.
+ * for newer (22000) hardware.
*/
SCD_QUEUE_CFG = 0x1d,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index aa76dcc148bd..a57c7223df0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,6 +83,21 @@ enum iwl_data_path_subcmd_ids {
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+ */
+ TLC_MNG_CONFIG_CMD = 0xF,
+
+ /**
+ * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
+ */
+ TLC_MNG_NOTIF_REQ_CMD = 0x10,
+
+ /**
+ * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+ */
+ TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+ /**
* @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
*/
STA_PM_NOTIF = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 0a81fb1b6ed4..106782341544 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -250,10 +250,12 @@ struct iwl_mfu_assert_dump_notif {
* The ids for different type of markers to insert into the usniffer logs
*
* @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
+ * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
*/
enum iwl_mvm_marker_id {
MARKER_ID_TX_FRAME_LATENCY = 1,
-}; /* MARKER_ID_API_E_VER_1 */
+ MARKER_ID_SYNC_CLOCK = 2,
+}; /* MARKER_ID_API_E_VER_2 */
/**
* struct iwl_mvm_marker - mark info into the usniffer logs
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index ec42c84e5df2..17c7ef1662a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -68,6 +68,10 @@
*/
enum iwl_mac_conf_subcmd_ids {
/**
+ * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+ */
+ LOW_LATENCY_CMD = 0x3,
+ /**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
@@ -82,4 +86,19 @@ struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+/**
+ * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
+ *
+ * @mac_id: MAC ID to whom to apply the low-latency configurations
+ * @low_latency_rx: 1/0 to set/clear Rx low latency direction
+ * @low_latency_tx: 1/0 to set/clear Tx low latency direction
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_mac_low_latency_cmd {
+ __le32 mac_id;
+ u8 low_latency_rx;
+ u8 low_latency_tx;
+ __le16 reserved;
+} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a13fd8a1be62..e49a6f7be613 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -62,6 +62,267 @@
#include "mac.h"
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
+ * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
+ * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
+ */
+enum iwl_tlc_mng_cfg_flags {
+ IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0),
+ IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1),
+ IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2),
+ IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3),
+ IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4),
+ IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw - channel width options
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw {
+ IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+ IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
+ */
+enum iwl_tlc_mng_cfg_chains {
+ IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+ IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+ IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_gi - guard interval options
+ * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
+ * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
+ * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
+ * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
+ */
+enum iwl_tlc_mng_cfg_gi {
+ IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0),
+ IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1),
+ IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2),
+ IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode {
+ IWL_TLC_MNG_MODE_CCK = 0,
+ IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+ IWL_TLC_MNG_MODE_HT,
+ IWL_TLC_MNG_MODE_VHT,
+ IWL_TLC_MNG_MODE_HE,
+ IWL_TLC_MNG_MODE_INVALID,
+ IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_vht_he_types - VHT HE types
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
+ */
+enum iwl_tlc_mng_vht_he_types {
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
+ IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates - HT/VHT rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates {
+ IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+ IWL_TLC_MNG_HT_RATE_MCS1,
+ IWL_TLC_MNG_HT_RATE_MCS2,
+ IWL_TLC_MNG_HT_RATE_MCS3,
+ IWL_TLC_MNG_HT_RATE_MCS4,
+ IWL_TLC_MNG_HT_RATE_MCS5,
+ IWL_TLC_MNG_HT_RATE_MCS6,
+ IWL_TLC_MNG_HT_RATE_MCS7,
+ IWL_TLC_MNG_HT_RATE_MCS8,
+ IWL_TLC_MNG_HT_RATE_MCS9,
+ IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_RS_ANT_NUM 3
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_supp_ch_width: channel width
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
+ * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
+ * @non_ht_supp_rates: bitmap of supported legacy rates
+ * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @reserved2: reserved
+ * @he_supp_rates: bitmap of supported HE rates
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * @he_gi_support: 11ax HE guard interval
+ * @max_ampdu_cnt: max AMPDU size (frames count)
+ */
+struct iwl_tlc_config_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_supp_ch_width;
+ u8 chains;
+ u8 max_supp_ss;
+ u8 valid_vht_he_types;
+ __le16 flags;
+ __le16 non_ht_supp_rates;
+ __le16 ht_supp_rates[MAX_RS_ANT_NUM];
+ u8 mode;
+ u8 reserved2;
+ __le16 he_supp_rates;
+ u8 sgi_ch_width_supp;
+ u8 he_gi_support;
+ __le32 max_ampdu_cnt;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_TLC_NOTIF_INIT_RATE_POS 0
+#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
+#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+
+/**
+ * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
+ * @sta_id: relevant station
+ * @reserved1: reserved
+ * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
+ * @interval: minimum time between notifications from TLC to the driver (msec)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_notif_req_config_cmd {
+ u8 sta_id;
+ u8 reserved1;
+ __le16 flags;
+ __le16 interval;
+ __le16 reserved2;
+} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ */
+struct iwl_tlc_update_notif {
+ u8 sta_id;
+ u8 reserved;
+ __le16 flags;
+ __le32 values[16];
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ * frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ * driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ * threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+ IWL_TLC_DEBUG_FIXED_RATE,
+ IWL_TLC_DEBUG_STATS_TH,
+ IWL_TLC_DEBUG_STATS_TIME_TH,
+ IWL_TLC_DEBUG_AGG_TIME_LIM,
+ IWL_TLC_DEBUG_AGG_DIS_START_TH,
+ IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+ IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+ IWL_TLC_DEBUG_START_AC_RATE_IDX,
+ IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ * not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ __le32 flags;
+ __le32 fixed_rate;
+ __le16 stats_threshold;
+ __le16 time_threshold;
+ __le16 agg_time_lim;
+ __le16 agg_dis_start_threshold;
+ __le16 agg_frame_count_lim;
+ __le16 addba_retry_delay;
+ u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+ u8 no_far_range_tweak;
+ u8 reserved2[3];
+} __packed;
+
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -253,7 +514,6 @@ enum {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
RATE_MCS_ANT_C_MSK)
#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
-#define RATE_MCS_ANT_NUM 3
/* Bit 17: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f5d5ba7e37ec..a2a40b515a3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -121,7 +121,7 @@ enum iwl_tx_flags {
}; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
- * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
* @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
* @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
* to a secured STA
@@ -301,7 +301,7 @@ struct iwl_dram_sec_info {
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8106fd4be996..67aefc8fc9ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
- if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
+ /*
+ * If the loading of the FW completed successfully, the next step is to
+ * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+ * zero, the FW was already loaded successully. If the state is "NO_FW"
+ * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+ * can try to collect the data, since FW might just not be fully
+ * loaded (no "ALIVE" yet), and the debug data is accessible.
+ *
+ * Corner case: got the FW alive but crashed before getting the SMEM
+ * config. In such a case, due to HW access problems, we might
+ * collect garbage.
+ */
+ if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+ fwrt->smem_cfg.num_lmacs,
"Can't collect dbg data when FW isn't alive\n"))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
new file mode 100644
index 000000000000..e2ded29a145d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -0,0 +1,195 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "api/commands.h"
+#include "debugfs.h"
+
+#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
+static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \
+ char *buf, size_t count, \
+ loff_t *ppos); \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \
+ char *buf, size_t count, \
+ loff_t *ppos); \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct iwl_fw_runtime *fwrt = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \
+}
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, fwrt, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
+ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_mvm_marker marker = {
+ .dw_len = sizeof(struct iwl_mvm_marker) / 4,
+ .marker_id = MARKER_ID_SYNC_CLOCK,
+
+ /* the real timestamp is taken from the ftrace clock
+ * this is for finding the match between fw and kernel logs
+ */
+ .timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+ };
+
+ struct iwl_host_cmd hcmd = {
+ .id = MARKER_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &marker,
+ .len[0] = sizeof(marker),
+ };
+
+ return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
+{
+ int ret;
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+ unsigned long delay = fwrt->timestamp.delay;
+
+ ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+ if (!ret && delay)
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(delay));
+ else
+ IWL_INFO(fwrt,
+ "stopping timestamp_marker, ret: %d, delay: %u\n",
+ ret, jiffies_to_msecs(delay) / 1000);
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ u32 delay;
+
+ ret = kstrtou32(buf, 10, &delay);
+ if (ret < 0)
+ return ret;
+
+ IWL_INFO(fwrt,
+ "starting timestamp_marker trigger with delay: %us\n",
+ delay);
+
+ iwl_fw_cancel_timestamp(fwrt);
+
+ fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+
+ schedule_delayed_work(&fwrt->timestamp.wk,
+ round_jiffies_relative(fwrt->timestamp.delay));
+ return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir)
+{
+ INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+ FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR);
+ return 0;
+err:
+ IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
new file mode 100644
index 000000000000..e57ff92a68ae
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "runtime.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir);
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+ fwrt->timestamp.delay = 0;
+ cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+#else
+static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+ struct dentry *dbgfs_dir)
+{
+ return 0;
+}
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 37a5c5b4eda6..1a05d506ac9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -246,10 +246,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
- * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
- * include information about ACL time sharing.
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
+ * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
+ * deprecated.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -267,8 +267,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
- IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -313,6 +313,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
+ * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -367,6 +369,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -541,7 +545,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
} __packed;
/**
- * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
+ * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
@@ -556,7 +560,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
*
* This parses IWL_UCODE_TLV_FW_DBG_DEST
*/
-struct iwl_fw_dbg_dest_tlv {
+struct iwl_fw_dbg_dest_tlv_v1 {
u8 version;
u8 monitor_mode;
u8 size_power;
@@ -570,6 +574,26 @@ struct iwl_fw_dbg_dest_tlv {
struct iwl_fw_dbg_reg_op reg_ops[0];
} __packed;
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWL_M2S_UNIT_SIZE 0x100
+
+struct iwl_fw_dbg_dest_tlv {
+ u8 version;
+ u8 monitor_mode;
+ u8 size_power;
+ u8 reserved;
+ __le32 cfg_reg;
+ __le32 write_ptr_reg;
+ __le32 wrap_count;
+ u8 base_shift;
+ u8 size_shift;
+ struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 985496cc01d0..b23ffe12ad84 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -284,7 +284,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index bfe5316bbb6a..c39fe84bb4c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,10 +58,12 @@
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
+#include "debugfs.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct iwl_fw_runtime_ops *ops, void *ops_ctx)
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ struct dentry *dbgfs_dir)
{
memset(fwrt, 0, sizeof(*fwrt));
fwrt->trans = trans;
@@ -71,5 +73,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops = ops;
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
+ iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt)
+{
+ iwl_fw_cancel_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 50cfb6d795a5..e25c049f980f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -134,11 +134,21 @@ struct iwl_fw_runtime {
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
} dump;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ struct delayed_work wk;
+ u32 delay;
+ u64 seq;
+ } timestamp;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
- const struct iwl_fw *fw,
- const struct iwl_fw_runtime_ops *ops, void *ops_ctx);
+ const struct iwl_fw *fw,
+ const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ struct dentry *dbgfs_dir);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
enum iwl_ucode_type cur_fw_img)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 76675736ba4f..fb4b6442b4d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -63,8 +63,8 @@
#include "runtime.h"
#include "fw/api/commands.h"
-static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
- struct iwl_rx_packet *pkt)
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+ struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i, lmac;
@@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
- iwl_parse_shared_mem_a000(fwrt, pkt);
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+ iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e21e46cf6f9a..258d439bb0a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -89,7 +89,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_7000,
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
- IWL_DEVICE_FAMILY_A000,
+ IWL_DEVICE_FAMILY_22000,
};
/*
@@ -266,7 +266,7 @@ struct iwl_tt_params {
#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
@@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff {
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated
- * @gen2: a000 and on transport operation
+ * @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
* @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
@@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwla000_2ac_cfg_jf;
-extern const struct iwl_cfg iwla000_2ax_cfg_hr;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 7f16dcce0995..9518a82f44c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -95,7 +95,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
-
+ __field(void *, skbaddr)
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
@@ -110,6 +110,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
),
TP_fast_assign(
DEV_ASSIGN;
+ __entry->skbaddr = skb;
__entry->framelen = buf0_len;
if (hdr_len > 0)
__entry->framelen += skb->len - hdr_len;
@@ -120,9 +121,9 @@ TRACE_EVENT(iwlwifi_dev_tx,
__get_dynamic_array(buf1),
skb->len - hdr_len);
),
- TP_printk("[%s] TX %.2x (%zu bytes)",
+ TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
+ __entry->framelen, __entry->skbaddr)
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4b224d7d967c..9c4a7f648a44 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -296,7 +296,12 @@ struct iwl_firmware_pieces {
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
/* FW debug data parsed for driver usage */
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ bool dbg_dest_tlv_init;
+ u8 *dbg_dest_ver;
+ union {
+ struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+ };
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
@@ -919,27 +924,60 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version), "%u.%u.%u",
- major, minor, local_comp);
+ if (major >= 35)
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%08x.%u", major, minor, local_comp);
+ else
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%u.%u", major, minor, local_comp);
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
- struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
+ struct iwl_fw_dbg_dest_tlv *dest = NULL;
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+ u8 mon_mode;
+
+ pieces->dbg_dest_ver = (u8 *)tlv_data;
+ if (*pieces->dbg_dest_ver == 1) {
+ dest = (void *)tlv_data;
+ } else if (*pieces->dbg_dest_ver == 0) {
+ dest_v1 = (void *)tlv_data;
+ } else {
+ IWL_ERR(drv,
+ "The version is %d, and it is invalid\n",
+ *pieces->dbg_dest_ver);
+ break;
+ }
- if (pieces->dbg_dest_tlv) {
+ if (pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"dbg destination ignored, already exists\n");
break;
}
- pieces->dbg_dest_tlv = dest;
+ pieces->dbg_dest_tlv_init = true;
+
+ if (dest_v1) {
+ pieces->dbg_dest_tlv_v1 = dest_v1;
+ mon_mode = dest_v1->monitor_mode;
+ } else {
+ pieces->dbg_dest_tlv = dest;
+ mon_mode = dest->monitor_mode;
+ }
+
IWL_INFO(drv, "Found debug destination: %s\n",
- get_fw_dbg_mode_string(dest->monitor_mode));
+ get_fw_dbg_mode_string(mon_mode));
+
+ drv->fw.dbg_dest_reg_num = (dest_v1) ?
+ tlv_len -
+ offsetof(struct iwl_fw_dbg_dest_tlv_v1,
+ reg_ops) :
+ tlv_len -
+ offsetof(struct iwl_fw_dbg_dest_tlv,
+ reg_ops);
- drv->fw.dbg_dest_reg_num =
- tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
- reg_ops);
drv->fw.dbg_dest_reg_num /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
@@ -948,7 +986,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_DBG_CONF: {
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
- if (!pieces->dbg_dest_tlv) {
+ if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"Ignore dbg config %d - no destination configured\n",
conf->id);
@@ -1335,15 +1373,51 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
- if (pieces->dbg_dest_tlv) {
- drv->fw.dbg_dest_tlv =
- kmemdup(pieces->dbg_dest_tlv,
- sizeof(*pieces->dbg_dest_tlv) +
- sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num, GFP_KERNEL);
+ if (pieces->dbg_dest_tlv_init) {
+ size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num;
+
+ drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv)
goto out_free_fw;
+
+ if (*pieces->dbg_dest_ver == 0) {
+ memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+ dbg_dest_size);
+ } else {
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
+ drv->fw.dbg_dest_tlv;
+
+ dest_tlv->version = pieces->dbg_dest_tlv->version;
+ dest_tlv->monitor_mode =
+ pieces->dbg_dest_tlv->monitor_mode;
+ dest_tlv->size_power =
+ pieces->dbg_dest_tlv->size_power;
+ dest_tlv->wrap_count =
+ pieces->dbg_dest_tlv->wrap_count;
+ dest_tlv->write_ptr_reg =
+ pieces->dbg_dest_tlv->write_ptr_reg;
+ dest_tlv->base_shift =
+ pieces->dbg_dest_tlv->base_shift;
+ memcpy(dest_tlv->reg_ops,
+ pieces->dbg_dest_tlv->reg_ops,
+ sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+ drv->fw.dbg_dest_reg_num);
+
+ /* In version 1 of the destination tlv, which is
+ * relevant for internal buffer exclusively,
+ * the base address is part of given with the length
+ * of the buffer, and the size shift is give instead of
+ * end shift. We now store these values in base_reg,
+ * and end shift, and when dumping the data we'll
+ * manipulate it for extracting both the length and
+ * base address */
+ dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
+ dest_tlv->end_shift =
+ pieces->dbg_dest_tlv->size_shift;
+ }
}
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 66e5db41e559..11789ffb6512 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -121,7 +121,7 @@
#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
-/* a000 TFD table address, 64 bit */
+/* 22000 TFD table address, 64 bit */
#define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */
@@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
}
-/* a000 configuration registers */
+/* 22000 configuration registers */
/*
* TFH Configuration register.
@@ -697,8 +697,8 @@ struct iwl_tfh_tb {
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
- * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
- * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
@@ -750,10 +750,10 @@ struct iwl_tfh_tfd {
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
- * For devices up to a000:
+ * For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
- * For a000 and on:
+ * For 22000 and on:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 921cab9e2d73..c25ed1a0bbb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -551,7 +551,7 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
- /* a000 functions */
+ /* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
@@ -579,6 +579,7 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
+ void (*sw_reset)(struct iwl_trans *trans);
bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
void (*release_nic_access)(struct iwl_trans *trans,
unsigned long *flags);
@@ -744,7 +745,7 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
- const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
@@ -1124,6 +1125,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
+static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
+{
+ if (trans->ops->sw_reset)
+ trans->ops->sw_reset(trans);
+}
+
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index a47635c32c11..9ffd21918b5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
-iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 79c80f181f7d..890dbfff3a06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -7,7 +7,6 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +33,6 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!iwl_mvm_has_new_ats_coex_api(mvm)) {
- struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data;
-
- mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0];
- mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1];
- mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2];
- mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3];
- mvm->last_bt_notif.msg_idx = v4->msg_idx;
- mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance;
- mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut;
- mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut;
- mvm->last_bt_notif.bt_activity_grading =
- v4->bt_activity_grading;
- mvm->last_bt_notif.ttc_status = v4->ttc_status;
- mvm->last_bt_notif.rrc_status = v4->rrc_status;
- } else {
- /* save this notification for future use: rssi fluctuations */
- memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
- }
-
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n",
- mvm->last_bt_notif.bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
- le32_to_cpu(mvm->last_bt_notif.primary_ch_lut));
+ le32_to_cpu(notif->primary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
- le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut));
+ le32_to_cpu(notif->secondary_ch_lut));
IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading));
+ le32_to_cpu(notif->bt_activity_grading));
+ /* remember this notification for future use: rssi fluctuations */
+ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b1f73dcabd31..0e6cf39285f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
-enum iwl_mvm_tcp_packet_type {
- MVM_TCP_TX_SYN,
- MVM_TCP_RX_SYNACK,
- MVM_TCP_TX_DATA,
- MVM_TCP_RX_ACK,
- MVM_TCP_RX_WAKE,
- MVM_TCP_TX_FIN,
-};
-
-static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
-{
- __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
- return cpu_to_le16(be16_to_cpu((__force __be16)check));
-}
-
-static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
- struct cfg80211_wowlan_tcp *tcp,
- void *_pkt, u8 *mask,
- __le16 *pseudo_hdr_csum,
- enum iwl_mvm_tcp_packet_type ptype)
-{
- struct {
- struct ethhdr eth;
- struct iphdr ip;
- struct tcphdr tcp;
- u8 data[];
- } __packed *pkt = _pkt;
- u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
- int i;
-
- pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
- pkt->ip.version = 4;
- pkt->ip.ihl = 5;
- pkt->ip.protocol = IPPROTO_TCP;
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- case MVM_TCP_TX_DATA:
- case MVM_TCP_TX_FIN:
- memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
- memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
- pkt->ip.ttl = 128;
- pkt->ip.saddr = tcp->src;
- pkt->ip.daddr = tcp->dst;
- pkt->tcp.source = cpu_to_be16(tcp->src_port);
- pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
- /* overwritten for TX SYN later */
- pkt->tcp.doff = sizeof(struct tcphdr) / 4;
- pkt->tcp.window = cpu_to_be16(65000);
- break;
- case MVM_TCP_RX_SYNACK:
- case MVM_TCP_RX_ACK:
- case MVM_TCP_RX_WAKE:
- memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
- memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
- pkt->ip.saddr = tcp->dst;
- pkt->ip.daddr = tcp->src;
- pkt->tcp.source = cpu_to_be16(tcp->dst_port);
- pkt->tcp.dest = cpu_to_be16(tcp->src_port);
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- /* firmware assumes 8 option bytes - 8 NOPs for now */
- memset(pkt->data, 0x01, 8);
- ip_tot_len += 8;
- pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
- pkt->tcp.syn = 1;
- break;
- case MVM_TCP_TX_DATA:
- ip_tot_len += tcp->payload_len;
- memcpy(pkt->data, tcp->payload, tcp->payload_len);
- pkt->tcp.psh = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_TX_FIN:
- pkt->tcp.fin = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_SYNACK:
- pkt->tcp.syn = 1;
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_ACK:
- pkt->tcp.ack = 1;
- break;
- case MVM_TCP_RX_WAKE:
- ip_tot_len += tcp->wake_len;
- pkt->tcp.psh = 1;
- pkt->tcp.ack = 1;
- memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
- break;
- }
-
- switch (ptype) {
- case MVM_TCP_TX_SYN:
- case MVM_TCP_TX_DATA:
- case MVM_TCP_TX_FIN:
- pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
- pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
- break;
- case MVM_TCP_RX_WAKE:
- for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
- u8 tmp = tcp->wake_mask[i];
- mask[i + 6] |= tmp << 6;
- if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
- mask[i + 7] = tmp >> 2;
- }
- /* fall through for ethernet/IP/TCP headers mask */
- case MVM_TCP_RX_SYNACK:
- case MVM_TCP_RX_ACK:
- mask[0] = 0xff; /* match ethernet */
- /*
- * match ethernet, ip.version, ip.ihl
- * the ip.ihl half byte is really masked out by firmware
- */
- mask[1] = 0x7f;
- mask[2] = 0x80; /* match ip.protocol */
- mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
- mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
- mask[5] = 0x80; /* match tcp flags */
- /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
- break;
- };
-
- *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
- pkt->ip.saddr, pkt->ip.daddr);
-}
-
-static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_wowlan_tcp *tcp)
-{
- struct iwl_wowlan_remote_wake_config *cfg;
- struct iwl_host_cmd cmd = {
- .id = REMOTE_WAKE_CONFIG_CMD,
- .len = { sizeof(*cfg), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
-
- if (!tcp)
- return 0;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
- cmd.data[0] = cfg;
-
- cfg->max_syn_retries = 10;
- cfg->max_data_retries = 10;
- cfg->tcp_syn_ack_timeout = 1; /* seconds */
- cfg->tcp_ack_timeout = 1; /* seconds */
-
- /* SYN (TX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->syn_tx.data, NULL,
- &cfg->syn_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_SYN);
- cfg->syn_tx.info.tcp_payload_length = 0;
-
- /* SYN/ACK (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
- &cfg->synack_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_SYNACK);
- cfg->synack_rx.info.tcp_payload_length = 0;
-
- /* KEEPALIVE/ACK (TX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->keepalive_tx.data, NULL,
- &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_DATA);
- cfg->keepalive_tx.info.tcp_payload_length =
- cpu_to_le16(tcp->payload_len);
- cfg->sequence_number_offset = tcp->payload_seq.offset;
- /* length must be 0..4, the field is little endian */
- cfg->sequence_number_length = tcp->payload_seq.len;
- cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
- cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
- if (tcp->payload_tok.len) {
- cfg->token_offset = tcp->payload_tok.offset;
- cfg->token_length = tcp->payload_tok.len;
- cfg->num_tokens =
- cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
- memcpy(cfg->tokens, tcp->payload_tok.token_stream,
- tcp->tokens_size);
- } else {
- /* set tokens to max value to almost never run out */
- cfg->num_tokens = cpu_to_le16(65535);
- }
-
- /* ACK (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->keepalive_ack_rx.data,
- cfg->keepalive_ack_rx.rx_mask,
- &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_ACK);
- cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
-
- /* WAKEUP (RX) */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
- &cfg->wake_rx.info.tcp_pseudo_header_checksum,
- MVM_TCP_RX_WAKE);
- cfg->wake_rx.info.tcp_payload_length =
- cpu_to_le16(tcp->wake_len);
-
- /* FIN */
- iwl_mvm_build_tcp_packet(
- vif, tcp, cfg->fin_tx.data, NULL,
- &cfg->fin_tx.info.tcp_pseudo_header_checksum,
- MVM_TCP_TX_FIN);
- cfg->fin_tx.info.tcp_payload_length = 0;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- kfree(cfg);
-
- return ret;
-}
-
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
- if (ret)
- return ret;
-
- ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
- return ret;
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
}
static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 2ff594f11259..a7892c1254a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ struct iwl_mvm *mvm = lq_sta->pers.drv;
+ static const size_t bufsz = 2048;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+ lq_sta->pers.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "A-MPDU size limit %d\n",
+ lq_sta->pers.dbg_agg_frame_count_lim);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "valid_tx_ant %s%s%s\n",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "last tx rate=0x%X ",
+ lq_sta->last_rate_n_flags);
+
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+ lq_sta->last_rate_n_flags);
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
static
-int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
- struct iwl_bt_coex_profile_notif *notif, char *buf,
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
int pos, int bufsz)
{
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
@@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
BT_MBOX_PRINT(3, INBAND_P, false);
BT_MBOX_PRINT(3, MSG_TYPE_2, false);
BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm));
-
- if (iwl_mvm_has_new_ats_coex_api(mvm)) {
- BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false);
- BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true);
- }
+ BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
return pos;
}
@@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz);
+ pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
notif->bt_ci_compliance);
@@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM");
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
continue;
pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
(int)(ARRAY_SIZE(stats->last_rates) - i));
- pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+ pos += rs_pretty_print_rate(pos, endpos - pos,
+ stats->last_rates[idx]);
}
spin_unlock_bh(&mvm->drv_stats_lock);
@@ -1179,7 +1221,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
loff_t *ppos)
{
struct iwl_trans *trans = mvm->trans;
- const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
struct iwl_continuous_record_cmd cont_rec = {};
int ret, rec_mode;
@@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, sta, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
+ } while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
static ssize_t
iwl_dbgfs_prph_reg_read(struct file *file,
char __user *user_buf,
@@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
@@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = {
.llseek = default_llseek,
};
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_has_tlc_offload(mvm))
+ MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
+
+ return;
+err:
+ IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c0de7bb86cf7..0920be637b57 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
ret = iwl_run_init_mvm_ucode(mvm, false);
- if (iwlmvm_mod_params.init_dbg)
- return 0;
-
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
return ret;
}
@@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Init RSS configuration */
- /* TODO - remove a000 disablement when we have RXQ config API */
+ /* TODO - remove 22000 disablement when we have RXQ config API */
if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
+ mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- if (!iwlmvm_mod_params.init_dbg)
+ if (!iwlmvm_mod_params.init_dbg || !ret)
iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3e92a117c0b8..8aed40a8bc38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_PM_SLEEP
-static const struct nl80211_wowlan_tcp_data_token_feature
-iwl_mvm_wowlan_tcp_token_feature = {
- .min_len = 0,
- .max_len = 255,
- .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
-};
-
-static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
- .tok = &iwl_mvm_wowlan_tcp_token_feature,
- .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
- sizeof(struct ethhdr) -
- sizeof(struct iphdr) -
- sizeof(struct tcphdr),
- .data_interval_max = 65535, /* __le16 in API */
- .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
- sizeof(struct ethhdr) -
- sizeof(struct iphdr) -
- sizeof(struct tcphdr),
- .seq = true,
-};
-#endif
-
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
/*
* Use the reserved field to indicate magic values.
@@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ }
+
if (iwl_mvm_has_new_rx_api(mvm))
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* this is the case for CCK frames, it's better (only 8) for OFDM */
hw->radiotap_timestamp.accuracy = 22;
- hw->rate_control_algorithm = "iwl-mvm-rs";
+ if (!iwl_mvm_has_tlc_offload(mvm))
+ hw->rate_control_algorithm = RS_NAME;
+
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
- mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
hw->wiphy->wowlan = &mvm->wowlan;
}
#endif
@@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
duration, type);
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
flush_work(&mvm->roc_done_wk);
mutex_lock(&mvm->mutex);
@@ -3813,7 +3801,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
mvm->noa_duration = noa_duration;
mvm->noa_vif = vif;
- return iwl_mvm_update_quotas(mvm, false, NULL);
+ return iwl_mvm_update_quotas(mvm, true, NULL);
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||
@@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
mvm->trans->num_rx_queues);
/* TODO - remove this when we have RXQ config API */
- if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
qmask = BIT(0);
if (notif->sync)
atomic_set(&mvm->queue_sync_counter, 1);
@@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 55ab5349dd40..2d28e0804218 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
{
/* TODO - better define this */
- return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
+ return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1272,16 +1272,16 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_NEW_RX_STATS);
}
-static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
}
-static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
{
- return fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
}
static inline struct agg_tx_status *
@@ -1600,9 +1600,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, const u32 rate);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
+ struct iwl_mvm_sta *mvmsta,
struct ieee80211_rx_status *rx_status);
/* power management */
@@ -1882,5 +1882,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct dentry *dir);
+#endif
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 45470b6b351a..5d525a0023dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void)
}
ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-
- if (ret) {
+ if (ret)
pr_err("Unable to register MVM op_mode: %d\n", ret);
- iwl_mvm_rate_control_unregister();
- }
return ret;
}
@@ -605,7 +602,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
- iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
+ iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+ dbgfs_dir);
mvm->init_status = 0;
@@ -750,7 +748,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
- if (!iwlmvm_mod_params.init_dbg)
+ if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
mutex_unlock(&mvm->mutex);
@@ -804,6 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
+ iwl_fw_runtime_exit(&mvm->fwrt);
iwl_fw_flush_dump(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
@@ -844,7 +843,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
-
+ iwl_fw_runtime_exit(&mvm->fwrt);
iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);
@@ -1021,6 +1020,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
+ iwl_mvm_tlc_update_notif(mvm, pkt);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index b4a0264329b7..690559bdf421 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -202,6 +202,10 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return 0;
+
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 000000000000..55d1274c6092
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+ }
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+ if (chains & ANT_C)
+ fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+
+ return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ u8 supp = 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+
+ return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ bool vht_ena = vht_cap && vht_cap->vht_supported;
+ u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
+ IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
+ IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+
+ if (mvm->cfg->ht_params->stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+ if (mvm->cfg->ht_params->ldpc &&
+ ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
+
+ return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
+ struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+
+ for (i = 0; i < sta->rx_nss; i++) {
+ if (i == MAX_RS_ANT_NUM)
+ break;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+ }
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband,
+ struct iwl_tlc_config_cmd *cmd)
+{
+ int i;
+ unsigned long tmp;
+ unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+ /* non HT rates */
+ supp = 0;
+ tmp = sta->supp_rates[sband->band];
+ for_each_set_bit(i, &tmp, BITS_PER_LONG)
+ supp |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_supp_rates = cpu_to_le16(supp);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ /* HT/VHT rates */
+ if (vht_cap && vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+ } else if (ht_cap && ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
+{
+ u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
+ struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
+ .sta_id = sta_id,
+ .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
+ .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+ struct iwl_tlc_update_notif *notif;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_lq_sta_rs_fw *lq_sta;
+
+ notif = (void *)pkt->data;
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+
+ if (!mvmsta) {
+ IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ notif->sta_id);
+ return;
+ }
+
+ lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
+ lq_sta->last_rate_n_flags =
+ le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+ IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+ lq_sta->last_rate_n_flags);
+ }
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+ u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ struct ieee80211_supported_band *sband;
+ struct iwl_tlc_config_cmd cfg_cmd = {
+ .sta_id = mvmsta->sta_id,
+ .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+ .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+ .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+ .max_supp_ss = sta->rx_nss,
+ .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+ .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ };
+ int ret;
+
+ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+#endif
+ sband = hw->wiphy->bands[band];
+ rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+
+ rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+ return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ lq_sta->pers.drv = mvm;
+ lq_sta->pers.sta_id = mvmsta->sta_id;
+ lq_sta->pers.chains = 0;
+ memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
+ lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index c69515ed72df..60abb0084ee5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -42,8 +42,6 @@
#include "mvm.h"
#include "debugfs.h"
-#define RS_NAME "iwl-mvm-rs"
-
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
/* Calculations of success ratio are done in fixed point where 12800 is 100%.
@@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
return -EINVAL;
if (tbl->column != RS_COLUMN_INVALID) {
- struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+ struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
pers->tx_stats[tbl->column][scale_index].total += attempts;
pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1416,13 +1414,13 @@ done:
/*
* mac80211 sends us Tx status
*/
-static void rs_mac80211_tx_status(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+ struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
struct rs_rate *rate = &search_tbl->rate;
const struct rs_tx_column *column = &rs_tx_columns[col_id];
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
unsigned long rate_mask = 0;
u32 rate_idx = 0;
- memcpy(search_tbl, tbl, sz);
+ memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
rate->sgi = column->sgi;
rate->ant = column->ant;
@@ -2787,9 +2783,10 @@ out:
/* Save info about RSSI of last Rx */
void rs_update_last_rssi(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
+ struct iwl_mvm_sta *mvmsta,
struct ieee80211_rx_status *rx_status)
{
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
int i;
lq_sta->pers.chains = rx_status->chains;
@@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
}
-static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
- struct ieee80211_tx_rate_control *txrc)
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+ void *mvm_sta,
+ struct ieee80211_tx_rate_control *txrc)
{
- struct sk_buff *skb = txrc->skb;
- struct iwl_op_mode *op_mode __maybe_unused =
- (struct iwl_op_mode *)mvm_r;
+ struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+ struct sk_buff *skb = txrc->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = mvm_sta;
+ struct iwl_lq_sta *lq_sta;
struct rs_rate *optimal_rate;
u32 last_ucode_rate;
@@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
mvm_sta = NULL;
}
- /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->pers.drv) {
- IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
- mvm_sta = NULL;
- }
-
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, mvm_sta, txrc))
return;
+ if (!mvm_sta)
+ return;
+
+ lq_sta = mvm_sta;
iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
@@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
}
}
-static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
@@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
lq_sta->pers.last_rssi = S8_MIN;
- return &mvmsta->lq_sta;
+ return lq_sta;
}
static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
{
spin_lock_bh(&mvm->drv_stats_lock);
memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
@@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
/*
* Called after adding a new station to initialize rate scaling
*/
-void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band, bool init)
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
struct ieee80211_supported_band *sband;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
@@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
-static void rs_rate_update(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
+static void rs_drv_rate_update(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta,
+ void *priv_sta, u32 changed)
{
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
u8 tid;
- struct iwl_op_mode *op_mode =
- (struct iwl_op_mode *)mvm_r;
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
return;
@@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data,
{
struct rs_bfer_active_iter_data *data = _data;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
if (sta == data->exclude_sta)
@@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
/* Disallow BFER on another STA if active and we're a higher priority */
if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
- struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *bfersta_lq_cmd =
+ &bfer_mvmsta->lq_sta.rs_drv.lq;
u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
+
/* rate scale requires free function to be implemented */
static void rs_free(void *mvm_rate)
{
return;
}
-static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
- void *mvm_sta)
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
{
struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
@@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type, *bw;
@@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
!(rate & RATE_MCS_VHT_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
- return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
- rs_pretty_ant(ant),
- index == IWL_RATE_INVALID ? "BAD" :
- iwl_rate_mcs[index].mbps);
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+ rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs[index].mbps);
}
if (rate & RATE_MCS_VHT_MSK) {
@@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
bw = "BAD BW";
}
- return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
- type, rs_pretty_ant(ant), bw, mcs, nss,
- (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
- (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
- (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+ return scnprintf(buf, bufsz,
+ "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ type, rs_pretty_ant(ant), bw, mcs, nss,
+ (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
/**
@@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
int desc = 0;
int i = 0;
ssize_t ret;
+ static const size_t bufsz = 2048;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm_sta *mvmsta =
- container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+ container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate;
u32 ss_params;
mvm = lq_sta->pers.drv;
- buff = kmalloc(2048, GFP_KERNEL);
+ buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->pers.dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "failed=%d success=%d rate=0%lX\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(rate)) ? "legacy" :
- is_vht(rate) ? "VHT" : "HT");
+ desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+ (is_legacy(rate)) ? "legacy" :
+ is_vht(rate) ? "VHT" : "HT");
if (!is_legacy(rate)) {
- desc += sprintf(buff + desc, " %s",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
(is_siso(rate)) ? "SISO" : "MIMO2");
- desc += sprintf(buff + desc, " %s",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s",
(is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" :
(is_ht80(rate)) ? "80MHz" :
(is_ht160(rate)) ? "160MHz" : "BAD BW");
- desc += sprintf(buff + desc, " %s %s %s %s\n",
+ desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
(lq_sta->is_agg) ? "AGG on" : "",
(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
}
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
lq_sta->lq.flags,
lq_sta->lq.mimo_delim,
lq_sta->lq.single_stream_ant_msk,
lq_sta->lq.dual_stream_ant_msk);
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
le16_to_cpu(lq_sta->lq.agg_time_limit),
lq_sta->lq.agg_disable_start_th,
lq_sta->lq.agg_frame_cnt_limit);
- desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+ desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+ lq_sta->lq.reduced_tpc);
ss_params = le32_to_cpu(lq_sta->lq.ss_params);
- desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "single stream params: %s%s%s%s\n",
(ss_params & LQ_SS_PARAMS_VALID) ?
"VALID" : "INVALID",
(ss_params & LQ_SS_BFER_ALLOWED) ?
@@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
", STBC" : "",
(ss_params & LQ_SS_FORCE) ?
", FORCE" : "");
- desc += sprintf(buff+desc,
+ desc += scnprintf(buff + desc, bufsz - desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
lq_sta->lq.initial_rate_index[1],
@@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
- desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
- desc += rs_pretty_print_rate(buff+desc, r);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ " rate[%d] 0x%X ", i, r);
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
-static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+ struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_mvm_sta *mvmsta;
- mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+ mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
if (!mvmsta->vif)
return;
@@ -4014,7 +4015,7 @@ err:
IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
}
-static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
{
}
#endif
@@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
-static void rs_rate_init_stub(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *mvm_sta)
+static void rs_rate_init_ops(void *mvm_r,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static const struct rate_control_ops rs_mvm_ops = {
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
.name = RS_NAME,
- .tx_status = rs_mac80211_tx_status,
- .get_rate = rs_get_rate,
- .rate_init = rs_rate_init_stub,
+ .tx_status = rs_drv_mac80211_tx_status,
+ .get_rate = rs_drv_get_rate,
+ .rate_init = rs_rate_init_ops,
.alloc = rs_alloc,
.free = rs_free,
- .alloc_sta = rs_alloc_sta,
+ .alloc_sta = rs_drv_alloc_sta,
.free_sta = rs_free_sta,
- .rate_update = rs_rate_update,
+ .rate_update = rs_drv_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = rs_add_debugfs,
- .remove_sta_debugfs = rs_remove_debugfs,
+ .add_sta_debugfs = rs_drv_add_sta_debugfs,
+ .remove_sta_debugfs = rs_remove_sta_debugfs,
#endif
};
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool init)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ rs_fw_rate_init(mvm, sta, band);
+ else
+ rs_drv_rate_init(mvm, sta, band, init);
+}
+
int iwl_mvm_rate_control_register(void)
{
- return ieee80211_rate_control_register(&rs_mvm_ops);
+ return ieee80211_rate_control_register(&rs_mvm_ops_drv);
}
void iwl_mvm_rate_control_unregister(void)
{
- ieee80211_rate_control_unregister(&rs_mvm_ops);
+ ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
}
-/**
- * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this request and previous requests,
- * and send the LQ command.
- * @mvmsta: The station
- * @enable: Enable Tx protection?
- */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
- bool enable)
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
{
- struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+ struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
lockdep_assert_held(&mvm->mutex);
@@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
return iwl_mvm_send_lq_cmd(mvm, lq, false);
}
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ return rs_fw_tx_protection(mvm, mvmsta, enable);
+ else
+ return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 32b4d66debea..fb18cb8c233d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -36,6 +36,8 @@
#include "fw-api.h"
#include "iwl-trans.h"
+#define RS_NAME "iwl-mvm-rs"
+
struct iwl_rs_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_ht_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
@@ -218,6 +220,38 @@ struct iwl_rate_mcs_info {
};
/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+
+ /* persistent fields - initialized only once - keep last! */
+ struct lq_sta_pers_rs_fw {
+ u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ u32 dbg_fixed_rate;
+ u16 dbg_agg_frame_count_lim;
+#endif
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 last_rssi;
+ struct iwl_mvm *drv;
+ } pers;
+};
+
+/**
* struct iwl_rate_scale_data -- tx success history for one rate
*/
struct iwl_rate_scale_data {
@@ -407,4 +441,18 @@ struct iwl_mvm_sta;
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d1a40688d5e1..d26833c5ce1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -222,7 +222,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */
- if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */
@@ -383,7 +385,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
false);
}
- rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
@@ -439,7 +441,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->bw = RATE_INFO_BW_160;
break;
}
- if (rate_n_flags & RATE_MCS_SGI_MSK)
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 3b8d44361380..a3f7c1bf3cc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -261,7 +261,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return 0;
case IWL_RX_MPDU_STATUS_SEC_TKIP:
/* Don't drop the frame and decrypt it in SW */
- if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
@@ -943,7 +945,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
false);
}
- rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
@@ -1020,7 +1022,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->bw = RATE_INFO_BW_160;
break;
}
- if (rate_n_flags & RATE_MCS_SGI_MSK)
+
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e4fd476e9ccb..356b16f40e78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
return newpos;
}
+#define WFA_TPC_IE_LEN 9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
static void
iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies,
@@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
memcpy(pos, ies->common_ies, ies->common_ie_len);
params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
- params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+ iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+ } else {
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+ }
}
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
@@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
- if (iwl_mvm_rrm_scan_needed(mvm))
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
if (params->pass_all)
@@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
}
- if (iwl_mvm_rrm_scan_needed(mvm))
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
if (params->pass_all)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 1add5615fc3a..6b2674e02606 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
goto err;
}
+ /*
+ * if rs is registered with mac80211, then "add station" will be handled
+ * via the corresponding ops, otherwise need to notify rate scaling here
+ */
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
if (ret)
@@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
}
/*
- * For a000 firmware and on we cannot add queue to a station unknown
+ * For 22000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm))
@@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
/*
- * For a000 firmware and on we cannot add queue to a station unknown
+ * For 22000 firmware and on we cannot add queue to a station unknown
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/*
* Enable cab queue after the ADD_STA command is sent.
- * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+ * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
* command with unknown station id, and for FW that doesn't support
* station API since the cab queue is not included in the
* tfd_queue_mask.
@@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->next_reclaimed);
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
@@ -2575,6 +2582,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.aggregate = true,
};
+ /*
+ * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+ * manager, so this function should never be called in this case.
+ */
+ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
@@ -2672,12 +2686,12 @@ out:
*/
mvmsta->max_agg_bufsize =
min(mvmsta->max_agg_bufsize, buf_size);
- mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+ mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
}
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
@@ -3615,7 +3629,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
if (mvm->trans->cfg->gen2)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index aedabe101cf0..5ffd6adbc383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
* @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ * the driver - %rs_drv or in the FW - %rs_fw.
* @reserved_queue: the queue reserved for this STA for DQA purposes
* Every STA has is given one reserved queue to allow it to operate. If no
* such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
u8 tid_to_baid[IWL_MAX_TID_COUNT];
- struct iwl_lq_sta lq_sta;
+ union {
+ struct iwl_lq_sta_rs_fw rs_fw;
+ struct iwl_lq_sta rs_drv;
+ } lq_sta;
struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e25cda9fbf6c..200ab50ec86b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
void iwl_mvm_roc_done_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
- u32 queues = 0;
/*
* Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
@@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
- queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
- }
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- queues |= BIT(mvm->aux_queue);
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
- }
synchronize_net();
@@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
- /*
- * Flush the done work, just in case it's still pending, so that
- * the work it does can complete and we can accept new frames.
- */
- flush_work(&mvm->roc_done_wk);
-
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 333bcb75b8af..dda77b327c98 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -888,10 +888,9 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
+ * In case of GSO the first packet may have been split, so don't warn.
*/
- if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
- "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
- skb_queue_len(deferred_tx_frames))) {
+ if (skb_queue_len(deferred_tx_frames) == 1) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
@@ -1132,7 +1131,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
}
/*
- * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
@@ -1624,7 +1623,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
- tid >= IWL_MAX_TID_COUNT,
+ tid > IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return;
@@ -1679,7 +1678,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
@@ -1719,8 +1718,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
ba_info->band = chanctx_conf->def.chan->band;
iwl_mvm_hwrate_to_tx_status(rate, ba_info);
- IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ if (!iwl_mvm_has_tlc_offload(mvm)) {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "No reclaim. Update rs directly\n");
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+ }
}
out:
@@ -1771,8 +1773,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
+ tid = ba_tfd->tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
mvmsta->tid_data[i].lq_color = lq_color;
- iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 03ffd84786ca..d65e1db7c097 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
u8 ind = last_idx;
int i;
- for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
- ind = (ind + 1) % RATE_MCS_ANT_NUM;
+ for (i = 0; i < MAX_RS_ANT_NUM; i++) {
+ ind = (ind + 1) % MAX_RS_ANT_NUM;
if (valid & BIT(ind))
return ind;
}
@@ -516,8 +516,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- usleep_range(5000, 6000);
+ iwl_trans_sw_reset(trans);
/* set INIT_DONE flag */
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -595,6 +594,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
+ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ IWL_ERR(mvm,
+ "DEVICE_ENABLED bit is not set. Aborting dump.\n");
+ return;
+ }
+
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
if (mvm->error_event_table[1])
@@ -906,7 +911,8 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, },
};
- if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
+ if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+ iwl_mvm_has_tlc_offload(mvm)))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@@ -1025,12 +1031,34 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int res;
+ bool low_latency;
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_vif_low_latency(mvmvif) == prev)
+ low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+ if (low_latency == prev)
return 0;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mvmvif->id)
+ };
+
+ if (low_latency) {
+ /* currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+ res = iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(LOW_LATENCY_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd);
+ if (res)
+ IWL_ERR(mvm, "Failed to send low latency command\n");
+ }
+
res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
return res;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ccd7c33c4c28..56fc28750a41 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
-/* a000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
+/* 22000 Series */
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
@@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans->cfg = cfg_7265d;
}
- if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
+ if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
@@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rf_id_chp == jf_chp_id) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwla000_2ax_cfg_qnj_jf_b0;
+ cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
else
- cfg = &iwla000_2ac_cfg_jf;
+ cfg = &iwl22000_2ac_cfg_jf;
} else if (rf_id_chp == hr_chp_id) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwla000_2ax_cfg_qnj_hr_a0;
+ cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
else
- cfg = &iwla000_2ac_cfg_hr;
+ cfg = &iwl22000_2ac_cfg_hr;
}
iwl_trans->cfg = cfg;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..ca3b64ff4dad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -658,23 +658,20 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline void iwl_pcie_sw_reset(struct iwl_trans *trans)
-{
- /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- usleep_range(5000, 6000);
-}
-
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
- return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
- idx);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->cfg->use_tfh)
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+
+ return txq->tfds + trans_pcie->tfd_size * idx;
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index ac05fd1e74c4..cb4012541f45 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -137,7 +137,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@@ -192,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 4541c86881d6..b406b536c850 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -176,6 +176,13 @@ out:
kfree(buf);
}
+static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
+{
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ usleep_range(5000, 6000);
+}
+
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -446,7 +453,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Set "initialization complete" bit to move adapter from
@@ -487,7 +494,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -580,7 +587,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
return;
}
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@@ -915,14 +922,9 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
int i;
- if (dest->version)
- IWL_ERR(trans,
- "DBG DEST version is %d - expect issues\n",
- dest->version);
-
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
@@ -1270,7 +1272,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -1744,7 +1746,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err;
}
- iwl_pcie_sw_reset(trans);
+ iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans);
if (err)
@@ -2816,8 +2818,17 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
* Update pointers to reflect actual values after
* shifting
*/
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ if (trans->dbg_dest_tlv->version) {
+ base = (iwl_read_prph(trans, base) &
+ IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg_dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->cfg->smem_offset;
+ } else {
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ }
+
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
@@ -2865,21 +2876,36 @@ static struct iwl_trans_dump_data
trans_pcie->fw_mon_size;
monitor_len = trans_pcie->fw_mon_size;
} else if (trans->dbg_dest_tlv) {
- u32 base, end;
+ u32 base, end, cfg_reg;
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+ if (trans->dbg_dest_tlv->version == 1) {
+ cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ cfg_reg = iwl_read_prph(trans, cfg_reg);
+ base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
+ trans->dbg_dest_tlv->base_shift;
+ base *= IWL_M2S_UNIT_SIZE;
+ base += trans->cfg->smem_offset;
- base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
- end = iwl_read_prph(trans, end) <<
- trans->dbg_dest_tlv->end_shift;
+ monitor_len =
+ (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
+ trans->dbg_dest_tlv->end_shift;
+ monitor_len *= IWL_M2S_UNIT_SIZE;
+ } else {
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
- /* Make "end" point to the actual end */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
- trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
- end += (1 << trans->dbg_dest_tlv->end_shift);
- monitor_len = end - base;
+ base = iwl_read_prph(trans, base) <<
+ trans->dbg_dest_tlv->base_shift;
+ end = iwl_read_prph(trans, end) <<
+ trans->dbg_dest_tlv->end_shift;
+
+ /* Make "end" point to the actual end */
+ if (trans->cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000 ||
+ trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
+ end += (1 << trans->dbg_dest_tlv->end_shift);
+ monitor_len = end - base;
+ }
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
monitor_len;
} else {
@@ -3025,6 +3051,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.write_mem = iwl_trans_pcie_write_mem, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
+ .sw_reset = iwl_trans_pcie_sw_reset, \
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
.release_nic_access = iwl_trans_pcie_release_nic_access, \
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \
@@ -3250,9 +3277,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
if (hw_status & UMAG_GEN_HW_IS_FPGA)
- trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0;
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
else
- trans->cfg = &iwla000_2ac_cfg_hr;
+ trans->cfg = &iwl22000_2ac_cfg_hr;
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
lockdep_assert_held(&txq->lock);
iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_pcie_get_tfd(trans_pcie, txq, idx));
+ iwl_pcie_get_tfd(trans, txq, idx));
/* free SKB */
if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, idx);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
bool amsdu;
int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i, num_tbs;
- void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
+ void *tfd = iwl_pcie_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
- tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 10b075a46b26..1cf22e62e3dd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,7 @@
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/rhashtable.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -489,6 +490,8 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
+static struct workqueue_struct *hwsim_wq;
+static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@@ -499,6 +502,7 @@ static struct platform_driver mac80211_hwsim_driver = {
struct mac80211_hwsim_data {
struct list_head list;
+ struct rhash_head rht;
struct ieee80211_hw *hw;
struct device *dev;
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
@@ -573,6 +577,13 @@ struct mac80211_hwsim_data {
u64 tx_failed;
};
+static const struct rhashtable_params hwsim_rht_params = {
+ .nelem_hint = 2,
+ .automatic_shrinking = true,
+ .key_len = ETH_ALEN,
+ .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]),
+ .head_offset = offsetof(struct mac80211_hwsim_data, rht),
+};
struct hwsim_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -684,6 +695,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS |
(ps ? IEEE80211_FCTL_PM : 0));
hdr->duration_id = cpu_to_le16(0);
memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -727,16 +739,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
val != PS_MANUAL_POLL)
return -EINVAL;
- old_ps = data->ps;
- data->ps = val;
-
- local_bh_disable();
if (val == PS_MANUAL_POLL) {
+ if (data->ps != PS_ENABLED)
+ return -EINVAL;
+ local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_ps_poll, data);
- data->ps_poll_pending = true;
- } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ local_bh_enable();
+ return 0;
+ }
+ old_ps = data->ps;
+ data->ps = val;
+
+ local_bh_disable();
+ if (old_ps == PS_DISABLED && val != PS_DISABLED) {
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
hwsim_send_nullfunc_ps, data);
@@ -1002,6 +1019,36 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
return res;
}
+static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
+{
+ u16 result = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS;
+ if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT;
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE;
+ if (rate->flags & IEEE80211_TX_RC_MCS)
+ result |= MAC80211_HWSIM_TX_RC_MCS;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH;
+ if (rate->flags & IEEE80211_TX_RC_DUP_DATA)
+ result |= MAC80211_HWSIM_TX_RC_DUP_DATA;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= MAC80211_HWSIM_TX_RC_SHORT_GI;
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS)
+ result |= MAC80211_HWSIM_TX_RC_VHT_MCS;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH;
+ if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH;
+
+ return result;
+}
+
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
int dst_portid)
@@ -1014,6 +1061,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
unsigned int hwsim_flags = 0;
int i;
struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
+ struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES];
uintptr_t cookie;
if (data->ps != PS_DISABLED)
@@ -1065,7 +1113,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
tx_attempts[i].idx = info->status.rates[i].idx;
+ tx_attempts_flags[i].idx = info->status.rates[i].idx;
tx_attempts[i].count = info->status.rates[i].count;
+ tx_attempts_flags[i].flags =
+ trans_tx_rate_flags_ieee2hwsim(
+ &info->status.rates[i]);
}
if (nla_put(skb, HWSIM_ATTR_TX_INFO,
@@ -1073,6 +1125,11 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
tx_attempts))
goto nla_put_failure;
+ if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS,
+ sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES,
+ tx_attempts_flags))
+ goto nla_put_failure;
+
/* We create a cookie to identify this skb */
data->pending_cookie++;
cookie = data->pending_cookie;
@@ -2725,6 +2782,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
+ err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
+ if (err < 0) {
+ pr_debug("mac80211_hwsim: radio index %d already present\n",
+ idx);
+ spin_unlock_bh(&hwsim_radio_lock);
+ goto failed_final_insert;
+ }
+
list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock);
@@ -2733,6 +2799,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
return idx;
+failed_final_insert:
+ debugfs_remove_recursive(data->debugfs);
+ ieee80211_unregister_hw(data->hw);
failed_hw:
device_release_driver(data->dev);
failed_bind:
@@ -2868,22 +2937,9 @@ static void hwsim_mon_setup(struct net_device *dev)
static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
{
- struct mac80211_hwsim_data *data;
- bool _found = false;
-
- spin_lock_bh(&hwsim_radio_lock);
- list_for_each_entry(data, &hwsim_radios, list) {
- if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
- _found = true;
- break;
- }
- }
- spin_unlock_bh(&hwsim_radio_lock);
-
- if (!_found)
- return NULL;
-
- return data;
+ return rhashtable_lookup_fast(&hwsim_radios_rht,
+ addr,
+ hwsim_rht_params);
}
static void hwsim_register_wmediumd(struct net *net, u32 portid)
@@ -2968,7 +3024,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
txi->status.rates[i].idx = tx_attempts[i].idx;
txi->status.rates[i].count = tx_attempts[i].count;
- /*txi->status.rates[i].flags = 0;*/
}
txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -3119,6 +3174,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
+ GENL_SET_ERR_MSG(info, "too many channels specified");
+ return -EINVAL;
+ }
+
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
@@ -3143,8 +3203,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
+ kfree(hwname);
return -EINVAL;
+ }
param.regd = hwsim_world_regdom_custom[idx];
}
@@ -3185,6 +3247,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
continue;
list_del(&data->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
@@ -3215,7 +3279,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
continue;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb) {
res = -ENOMEM;
goto out_err;
@@ -3340,8 +3404,10 @@ static void remove_user_radios(u32 portid)
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
list_del(&entry->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
+ hwsim_rht_params);
INIT_WORK(&entry->destroy_work, destroy_radio);
- schedule_work(&entry->destroy_work);
+ queue_work(hwsim_wq, &entry->destroy_work);
}
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -3415,8 +3481,10 @@ static void __net_exit hwsim_exit_net(struct net *net)
continue;
list_del(&data->list);
+ rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+ hwsim_rht_params);
INIT_WORK(&data->destroy_work, destroy_radio);
- schedule_work(&data->destroy_work);
+ queue_work(hwsim_wq, &data->destroy_work);
}
spin_unlock_bh(&hwsim_radio_lock);
}
@@ -3448,6 +3516,11 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
+ hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+ if (!hwsim_wq)
+ return -ENOMEM;
+ rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
+
err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
@@ -3586,8 +3659,12 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
+ flush_workqueue(hwsim_wq);
+
+ rhashtable_destroy(&hwsim_radios_rht);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
+ destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 3f5eda591dba..a96a79c1eff5 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -64,7 +64,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
* kernel, uses:
* %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
- * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * %HWSIM_ATTR_TX_INFO, %WSIM_ATTR_TX_INFO_FLAGS,
+ * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
* @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
* returns the radio ID (>= 0) or negative on errors, if successful
* then multicast the result
@@ -123,6 +124,8 @@ enum {
* @HWSIM_ATTR_RADIO_NAME: Name of radio, e.g. phy666
* @HWSIM_ATTR_NO_VIF: Do not create vif (wlanX) when creating radio.
* @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
+ * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding
+ * rates of %HWSIM_ATTR_TX_INFO
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -149,6 +152,7 @@ enum {
HWSIM_ATTR_NO_VIF,
HWSIM_ATTR_FREQ,
HWSIM_ATTR_PAD,
+ HWSIM_ATTR_TX_INFO_FLAGS,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
@@ -171,4 +175,66 @@ struct hwsim_tx_rate {
u8 count;
} __packed;
+/**
+ * enum hwsim_tx_rate_flags - per-rate flags set by the rate control algorithm.
+ * Inspired by structure mac80211_rate_control_flags. New flags may be
+ * appended, but old flags not deleted, to keep compatibility for
+ * userspace.
+ *
+ * These flags are set by the Rate control algorithm for each rate during tx,
+ * in the @flags member of struct ieee80211_tx_rate.
+ *
+ * @MAC80211_HWSIM_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate.
+ * @MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required.
+ * This is set if the current BSS requires ERP protection.
+ * @MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE: Use short preamble.
+ * @MAC80211_HWSIM_TX_RC_MCS: HT rate.
+ * @MAC80211_HWSIM_TX_RC_VHT_MCS: VHT MCS rate, in this case the idx field is
+ * split into a higher 4 bits (Nss) and lower 4 bits (MCS number)
+ * @MAC80211_HWSIM_TX_RC_GREEN_FIELD: Indicates whether this rate should be used
+ * in Greenfield mode.
+ * @MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be
+ * 40 MHz.
+ * @MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH: Indicates 80 MHz transmission
+ * @MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH: Indicates 160 MHz transmission
+ * (80+80 isn't supported yet)
+ * @MAC80211_HWSIM_TX_RC_DUP_DATA: The frame should be transmitted on both of
+ * the adjacent 20 MHz channels, if the current channel type is
+ * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS.
+ * @MAC80211_HWSIM_TX_RC_SHORT_GI: Short Guard interval should be used for this
+ * rate.
+ */
+enum hwsim_tx_rate_flags {
+ MAC80211_HWSIM_TX_RC_USE_RTS_CTS = BIT(0),
+ MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT = BIT(1),
+ MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE = BIT(2),
+
+ /* rate index is an HT/VHT MCS instead of an index */
+ MAC80211_HWSIM_TX_RC_MCS = BIT(3),
+ MAC80211_HWSIM_TX_RC_GREEN_FIELD = BIT(4),
+ MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH = BIT(5),
+ MAC80211_HWSIM_TX_RC_DUP_DATA = BIT(6),
+ MAC80211_HWSIM_TX_RC_SHORT_GI = BIT(7),
+ MAC80211_HWSIM_TX_RC_VHT_MCS = BIT(8),
+ MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH = BIT(9),
+ MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH = BIT(10),
+};
+
+/**
+ * struct hwsim_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate and number of retries used.
+ *
+ */
+struct hwsim_tx_rate_flag {
+ s8 idx;
+ u16 flags;
+} __packed;
#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6e0d9a9c5cfb..ce4432c535f0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
+ if (priv->scan_request) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "change virtual interface: scan in process\n");
+ return -EBUSY;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
@@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
switch (type) {
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index dcc529e9c0ef..874660052055 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
+ /* Setup the timer after transmit command, except that specific
+ * command might not have command response.
+ */
+ if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT)
+ mod_timer(&adapter->cmd_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
- /* Setup the timer after transmit command */
- mod_timer(&adapter->cmd_timer,
- jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
-
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 6f4239be609d..db2872daae97 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf,
{
struct mwifiex_private *priv = file->private_data;
- if (!priv->adapter->if_ops.device_dump)
- return -EIO;
-
- priv->adapter->if_ops.device_dump(priv->adapter);
+ /* For command timeouts, USB firmware will automatically emit
+ * firmware dump events, so we don't implement device_dump().
+ * For user-initiated dumps, we trigger it ourselves.
+ */
+ if (priv->adapter->iface_type == MWIFIEX_USB)
+ mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
+ else
+ priv->adapter->if_ops.device_dump(priv->adapter);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 13cd58e963b3..9c2cdef54074 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -56,6 +56,15 @@ struct mwifiex_fw_data {
u8 data[1];
} __packed;
+struct mwifiex_fw_dump_header {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 type;
+ __le16 len;
+} __packed;
+
+#define FW_DUMP_INFO_ENDED 0x0002
+
#define MWIFIEX_FW_DNLD_CMD_1 0x1
#define MWIFIEX_FW_DNLD_CMD_5 0x5
#define MWIFIEX_FW_DNLD_CMD_6 0x6
@@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_TDLS_CONFIG 0x0100
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
+#define HostCmd_CMD_FW_DUMP_EVENT 0x0125
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
@@ -570,6 +580,7 @@ enum mwifiex_channel_flags {
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
+#define EVENT_FW_DUMP_INFO 0x00000073
#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index e1aa86042469..d239e9248c05 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
+static void fw_dump_timer_fn(struct timer_list *t)
+{
+ struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+
+ mwifiex_upload_device_dump(adapter);
+}
+
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
+ adapter->devdump_len = 0;
+ timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
}
/*
@@ -396,6 +405,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
+ del_timer_sync(&adapter->devdump_timer);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a96bd7e653bf..12e739950332 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
}
EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
{
- void *p;
+ /* Dump all the memory data into single file, a userspace script will
+ * be used to split all the memory data to multiple files
+ */
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump start\n");
+ dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len,
+ GFP_KERNEL);
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump end\n");
+
+ /* Device dump data will be freed in device coredump release function
+ * after 5 min. Here reset adapter->devdump_data and ->devdump_len
+ * to avoid it been accidentally reused.
+ */
+ adapter->devdump_data = NULL;
+ adapter->devdump_len = 0;
+}
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+{
+ char *p;
char drv_version[64];
struct usb_card_rec *cardp;
struct sdio_mmc_card *sdio_card;
@@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
int i, idx;
struct netdev_queue *txq;
struct mwifiex_debug_info *debug_info;
- void *drv_info_dump;
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
- /* memory allocate here should be free in mwifiex_upload_device_dump*/
- drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
-
- if (!drv_info_dump)
- return 0;
-
- p = (char *)(drv_info_dump);
+ p = adapter->devdump_data;
+ strcpy(p, "========Start dump driverinfo========\n");
+ p += strlen("========Start dump driverinfo========\n");
p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
kfree(debug_info);
}
+ strcpy(p, "\n========End dump========\n");
+ p += strlen("\n========End dump========\n");
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
- *drv_info = drv_info_dump;
- return p - drv_info_dump;
+ adapter->devdump_len = p - (char *)adapter->devdump_data;
}
EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
- int drv_info_size)
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter)
{
- u8 idx, *dump_data, *fw_dump_ptr;
- u32 dump_len;
-
- dump_len = (strlen("========Start dump driverinfo========\n") +
- drv_info_size +
- strlen("\n========End dump========\n"));
+ u8 idx;
+ char *fw_dump_ptr;
+ u32 dump_len = 0;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
- dump_data = vzalloc(dump_len + 1);
- if (!dump_data)
- goto done;
-
- fw_dump_ptr = dump_data;
+ if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) {
+ /* Realloc in case buffer overflow */
+ fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len);
+ mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n");
+ if (!fw_dump_ptr) {
+ vfree(adapter->devdump_data);
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
- /* Dump all the memory data into single file, a userspace script will
- * be used to split all the memory data to multiple files
- */
- mwifiex_dbg(adapter, MSG,
- "== mwifiex dump information to /sys/class/devcoredump start");
+ memmove(fw_dump_ptr, adapter->devdump_data,
+ adapter->devdump_len);
+ vfree(adapter->devdump_data);
+ adapter->devdump_data = fw_dump_ptr;
+ }
- strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
- fw_dump_ptr += strlen("========Start dump driverinfo========\n");
- memcpy(fw_dump_ptr, drv_info, drv_info_size);
- fw_dump_ptr += drv_info_size;
- strcpy(fw_dump_ptr, "\n========End dump========\n");
- fw_dump_ptr += strlen("\n========End dump========\n");
+ fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
- /* device dump data will be free in device coredump release function
- * after 5 min
- */
- dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
- mwifiex_dbg(adapter, MSG,
- "== mwifiex dump information to /sys/class/devcoredump end");
+ adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data;
-done:
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
&adapter->mem_type_mapping_tbl[idx];
@@ -1241,10 +1248,8 @@ done:
entry->mem_ptr = NULL;
entry->mem_size = 0;
}
-
- vfree(drv_info);
}
-EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info);
/*
* CFG802.11 network device handler for statistics retrieval.
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 154c0796c0c5..6b5539b1f4d8 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -94,6 +94,8 @@ enum {
#define MAX_EVENT_SIZE 2048
+#define MWIFIEX_FW_DUMP_SIZE (2 * 1024 * 1024)
+
#define ARP_FILTER_MAX_BUF_SIZE 68
#define MWIFIEX_KEY_BUFFER_SIZE 16
@@ -1032,6 +1034,10 @@ struct mwifiex_adapter {
bool wake_by_wifi;
/* Aggregation parameters*/
struct bus_aggr_params bus_aggr;
+ /* Device dump data/length */
+ void *devdump_data;
+ int devdump_len;
+ struct timer_list devdump_timer;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1656,9 +1662,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
- int drv_info_size);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1677,6 +1683,7 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
int mwifiex_set_mac_address(struct mwifiex_private *priv,
struct net_device *dev);
+void mwifiex_devdump_tmo_func(unsigned long function_context);
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index cd314946452c..97a6199692ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2769,19 +2769,27 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
{
- int drv_info_size;
- void *drv_info;
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
- drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+ mwifiex_drv_info_dump(adapter);
mwifiex_pcie_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+ mwifiex_prepare_fw_dump_info(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- pci_reset_function(card->dev);
+ /* We can't afford to wait here; remove() might be waiting on us. If we
+ * can't grab the device lock, maybe we'll get another chance later.
+ */
+ pci_try_reset_function(card->dev);
}
static void mwifiex_pcie_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fd5183c10c4e..a82880132af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2505,15 +2505,21 @@ done:
static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
- int drv_info_size;
- void *drv_info;
- drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
+
+ mwifiex_drv_info_dump(adapter);
if (card->fw_dump_enh)
mwifiex_sdio_generic_fw_dump(adapter);
else
mwifiex_sdio_fw_dump(adapter);
- mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+ mwifiex_prepare_fw_dump_info(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_sdio_work(struct work_struct *work)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index fb090144a6d8..211e47d8b318 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -2206,6 +2206,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_CHAN_REGION_CFG:
ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
break;
+ case HostCmd_CMD_FW_DUMP_EVENT:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ cmd_ptr->size = cpu_to_le16(S_DS_GEN);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index d8db412b76c6..93dfb76cd8a6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -584,6 +584,62 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
adapter->coex_rx_win_size);
}
+static void
+mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_fw_dump_header *fw_dump_hdr =
+ (void *)adapter->event_body;
+
+ if (adapter->iface_type != MWIFIEX_USB) {
+ mwifiex_dbg(adapter, MSG,
+ "event is not on usb interface, ignore it\n");
+ return;
+ }
+
+ if (!adapter->devdump_data) {
+ /* When receive the first event, allocate device dump
+ * buffer, dump driver info.
+ */
+ adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+ if (!adapter->devdump_data) {
+ mwifiex_dbg(adapter, ERROR,
+ "vzalloc devdump data failure!\n");
+ return;
+ }
+
+ mwifiex_drv_info_dump(adapter);
+
+ /* If no proceeded event arrive in 10s, upload device
+ * dump data, this will be useful if the end of
+ * transmission event get lost, in this cornel case,
+ * user would still get partial of the dump.
+ */
+ mod_timer(&adapter->devdump_timer,
+ jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ }
+
+ /* Overflow check */
+ if (adapter->devdump_len + event_skb->len >= MWIFIEX_FW_DUMP_SIZE)
+ goto upload_dump;
+
+ memmove(adapter->devdump_data + adapter->devdump_len,
+ adapter->event_skb->data, event_skb->len);
+ adapter->devdump_len += event_skb->len;
+
+ if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+ mwifiex_dbg(adapter, MSG,
+ "receive end of transmission flag event!\n");
+ goto upload_dump;
+ }
+ return;
+
+upload_dump:
+ del_timer_sync(&adapter->devdump_timer);
+ mwifiex_upload_device_dump(adapter);
+}
+
/*
* This function handles events generated by firmware.
*
@@ -636,6 +692,7 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
* - EVENT_DELBA
* - EVENT_BA_STREAM_TIEMOUT
* - EVENT_AMSDU_AGGR_CTRL
+ * - EVENT_FW_DUMP_INFO
*/
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
@@ -1007,6 +1064,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->event_skb->len -
sizeof(eventcause));
break;
+ case EVENT_FW_DUMP_INFO:
+ mwifiex_dbg(adapter, EVENT, "event: firmware debug info\n");
+ mwifiex_fw_dump_info_event(priv, adapter->event_skb);
+ break;
/* Debugging event; not used, but let's not print an ERROR for it. */
case EVENT_UNKNOWN_DEBUG:
mwifiex_dbg(adapter, EVENT, "event: debug\n");
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index e813b2ca740c..8e4e9b6919e0 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -199,7 +199,7 @@ struct mwl8k_priv {
struct ieee80211_channel channels_24[14];
struct ieee80211_rate rates_24[13];
struct ieee80211_supported_band band_50;
- struct ieee80211_channel channels_50[4];
+ struct ieee80211_channel channels_50[9];
struct ieee80211_rate rates_50[8];
u32 ap_macids_supported;
u32 sta_macids_supported;
@@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = {
{ .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
{ .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
{ .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
index 28843fed750a..92ce4062f307 100644
--- a/drivers/net/wireless/mediatek/Kconfig
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK
if WLAN_VENDOR_MEDIATEK
source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
endif # WLAN_VENDOR_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
index 9d5f182fd7fd..00f945f59b38 100644
--- a/drivers/net/wireless/mediatek/Makefile
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MT7601U) += mt7601u/
+obj-$(CONFIG_MT76_CORE) += mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 000000000000..fc05d79c80d0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,10 @@
+config MT76_CORE
+ tristate
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ ---help---
+ This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 000000000000..a0156bc01dea
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+
+mt76-y := \
+ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt76x2e-y := \
+ mt76x2_pci.o mt76x2_dma.o \
+ mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
+ mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+ mt76x2_dfs.o mt76x2_trace.o
+
+CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
new file mode 100644
index 000000000000..8027bb7c03c2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+#define REORDER_TIMEOUT (HZ / 10)
+
+static void
+mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
+{
+ struct sk_buff *skb;
+
+ tid->head = ieee80211_sn_inc(tid->head);
+
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ return;
+
+ tid->reorder_buf[idx] = NULL;
+ tid->nframes--;
+ __skb_queue_tail(frames, skb);
+}
+
+static void
+mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
+ u16 head)
+{
+ int idx;
+
+ while (ieee80211_sn_less(tid->head, head)) {
+ idx = tid->head % tid->size;
+ mt76_aggr_release(tid, frames, idx);
+ }
+}
+
+static void
+mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ int idx = tid->head % tid->size;
+
+ while (tid->reorder_buf[idx]) {
+ mt76_aggr_release(tid, frames, idx);
+ idx = tid->head % tid->size;
+ }
+}
+
+static void
+mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status;
+ struct sk_buff *skb;
+ int start, idx, nframes;
+
+ if (!tid->nframes)
+ return;
+
+ mt76_rx_aggr_release_head(tid, frames);
+
+ start = tid->head % tid->size;
+ nframes = tid->nframes;
+
+ for (idx = (tid->head + 1) % tid->size;
+ idx != start && nframes;
+ idx = (idx + 1) % tid->size) {
+
+ skb = tid->reorder_buf[idx];
+ if (!skb)
+ continue;
+
+ nframes--;
+ status = (struct mt76_rx_status *) skb->cb;
+ if (!time_after(jiffies, status->reorder_time +
+ REORDER_TIMEOUT))
+ continue;
+
+ mt76_rx_aggr_release_frames(tid, frames, status->seqno);
+ }
+
+ mt76_rx_aggr_release_head(tid, frames);
+}
+
+static void
+mt76_rx_aggr_reorder_work(struct work_struct *work)
+{
+ struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
+ reorder_work.work);
+ struct mt76_dev *dev = tid->dev;
+ struct sk_buff_head frames;
+
+ __skb_queue_head_init(&frames);
+
+ local_bh_disable();
+
+ spin_lock(&tid->lock);
+ mt76_rx_aggr_check_release(tid, &frames);
+ spin_unlock(&tid->lock);
+
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+ mt76_rx_complete(dev, &frames, -1);
+
+ local_bh_enable();
+}
+
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_sta *sta;
+ struct mt76_rx_tid *tid;
+ bool sn_less;
+ u16 seqno, head, size;
+ u8 idx;
+
+ __skb_queue_tail(frames, skb);
+
+ sta = wcid_to_sta(wcid);
+ if (!sta || !status->aggr)
+ return;
+
+ tid = rcu_dereference(wcid->aggr[status->tid]);
+ if (!tid)
+ return;
+
+ spin_lock_bh(&tid->lock);
+
+ if (tid->stopped)
+ goto out;
+
+ head = tid->head;
+ seqno = status->seqno;
+ size = tid->size;
+ sn_less = ieee80211_sn_less(seqno, head);
+
+ if (!tid->started) {
+ if (sn_less)
+ goto out;
+
+ tid->started = true;
+ }
+
+ if (sn_less) {
+ __skb_unlink(skb, frames);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ if (seqno == head) {
+ tid->head = ieee80211_sn_inc(head);
+ if (tid->nframes)
+ mt76_rx_aggr_release_head(tid, frames);
+ goto out;
+ }
+
+ __skb_unlink(skb, frames);
+
+ /*
+ * Frame sequence number exceeds buffering window, free up some space
+ * by releasing previous frames
+ */
+ if (!ieee80211_sn_less(seqno, head + size)) {
+ head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
+ mt76_rx_aggr_release_frames(tid, frames, head);
+ }
+
+ idx = seqno % size;
+
+ /* Discard if the current slot is already in use */
+ if (tid->reorder_buf[idx]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ status->reorder_time = jiffies;
+ tid->reorder_buf[idx] = skb;
+ tid->nframes++;
+ mt76_rx_aggr_release_head(tid, frames);
+
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+
+out:
+ spin_unlock_bh(&tid->lock);
+}
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
+ u16 ssn, u8 size)
+{
+ struct mt76_rx_tid *tid;
+
+ mt76_rx_aggr_stop(dev, wcid, tidno);
+
+ tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
+ GFP_KERNEL);
+ if (!tid)
+ return -ENOMEM;
+
+ tid->dev = dev;
+ tid->head = ssn;
+ tid->size = size;
+ INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
+ spin_lock_init(&tid->lock);
+
+ rcu_assign_pointer(wcid->aggr[tidno], tid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
+
+static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+{
+ u8 size = tid->size;
+ int i;
+
+ spin_lock_bh(&tid->lock);
+
+ tid->stopped = true;
+ for (i = 0; tid->nframes && i < size; i++) {
+ struct sk_buff *skb = tid->reorder_buf[i];
+
+ if (!skb)
+ continue;
+
+ tid->nframes--;
+ dev_kfree_skb(skb);
+ }
+
+ spin_unlock_bh(&tid->lock);
+
+ cancel_delayed_work_sync(&tid->reorder_work);
+}
+
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
+{
+ struct mt76_rx_tid *tid;
+
+ rcu_read_lock();
+
+ tid = rcu_dereference(wcid->aggr[tidno]);
+ if (tid) {
+ rcu_assign_pointer(wcid->aggr[tidno], NULL);
+ mt76_rx_aggr_shutdown(dev, tid);
+ kfree_rcu(tid, rcu_head);
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 000000000000..c121b502a462
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt76_dev *dev = data;
+
+ dev->bus->wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt76_dev *dev = data;
+
+ *val = dev->bus->rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+ "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+ struct mt76_queue *q = &dev->q_tx[i];
+
+ if (!q->ndesc)
+ continue;
+
+ seq_printf(s,
+ "%d: queued=%d head=%d tail=%d swq_queued=%d\n",
+ i, q->queued, q->head, q->tail, q->swq_queued);
+ }
+
+ return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return NULL;
+
+ debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
+ if (dev->otp.data)
+ debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
+ debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 000000000000..3518703524e7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+#define DMA_DUMMY_TXWI ((void *) ~0)
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int size;
+ int i;
+
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->swq);
+
+ size = q->ndesc * sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ size = q->ndesc * sizeof(*q->entry);
+ q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ /* clear descriptors */
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ iowrite32(q->desc_dma, &q->regs->desc_base);
+ iowrite32(0, &q->regs->cpu_idx);
+ iowrite32(0, &q->regs->dma_idx);
+ iowrite32(q->ndesc, &q->regs->ring_size);
+
+ return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi)
+{
+ struct mt76_desc *desc;
+ u32 ctrl;
+ int i, idx = -1;
+
+ if (txwi)
+ q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+
+ for (i = 0; i < nbufs; i += 2, buf += 2) {
+ u32 buf0 = buf[0].addr, buf1 = 0;
+
+ ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+ if (i < nbufs - 1) {
+ buf1 = buf[1].addr;
+ ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ }
+
+ if (i == nbufs - 1)
+ ctrl |= MT_DMA_CTL_LAST_SEC0;
+ else if (i == nbufs - 2)
+ ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+ idx = q->head;
+ q->head = (q->head + 1) % q->ndesc;
+
+ desc = &q->desc[idx];
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+ WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ q->queued++;
+ }
+
+ q->entry[idx].txwi = txwi;
+ q->entry[idx].skb = skb;
+
+ return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ struct mt76_queue_entry *prev_e)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+ u32 ctrl = le32_to_cpu(__ctrl);
+
+ if (!e->txwi || !e->skb) {
+ __le32 addr = READ_ONCE(q->desc[idx].buf0);
+ u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+ dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ DMA_TO_DEVICE);
+ }
+
+ if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+ __le32 addr = READ_ONCE(q->desc[idx].buf1);
+ u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+ dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ DMA_TO_DEVICE);
+ }
+
+ if (e->txwi == DMA_DUMMY_TXWI)
+ e->txwi = NULL;
+
+ *prev_e = *e;
+ memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ q->head = ioread32(&q->regs->dma_idx);
+ q->tail = q->head;
+ iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue_entry entry;
+ bool wake = false;
+ int last;
+
+ if (!q->ndesc)
+ return;
+
+ spin_lock_bh(&q->lock);
+ if (flush)
+ last = -1;
+ else
+ last = ioread32(&q->regs->dma_idx);
+
+ while (q->queued && q->tail != last) {
+ mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+ if (entry.schedule)
+ q->swq_queued--;
+
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, q, &entry, flush);
+
+ if (entry.txwi) {
+ mt76_put_txwi(dev, entry.txwi);
+ wake = true;
+ }
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ if (!flush && q->tail == last)
+ last = ioread32(&q->regs->dma_idx);
+ }
+
+ if (!flush)
+ mt76_txq_schedule(dev, q);
+ else
+ mt76_dma_sync_idx(dev, q);
+
+ wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+ spin_unlock_bh(&q->lock);
+
+ if (wake)
+ ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+ int *len, u32 *info, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_desc *desc = &q->desc[idx];
+ dma_addr_t buf_addr;
+ void *buf = e->buf;
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+ if (len) {
+ u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+ *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+ *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+ }
+
+ if (info)
+ *info = le32_to_cpu(desc->info);
+
+ dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ e->buf = NULL;
+
+ return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more)
+{
+ int idx = q->tail;
+
+ *more = false;
+ if (!q->queued)
+ return NULL;
+
+ if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+{
+ dma_addr_t addr;
+ void *buf;
+ int frames = 0;
+ int len = SKB_WITH_OVERHEAD(q->buf_size);
+ int offset = q->buf_offset;
+ int idx;
+ void *(*alloc)(unsigned int fragsz);
+
+ if (napi)
+ alloc = napi_alloc_frag;
+ else
+ alloc = netdev_alloc_frag;
+
+ spin_lock_bh(&q->lock);
+
+ while (q->queued < q->ndesc - 1) {
+ struct mt76_queue_buf qbuf;
+
+ buf = alloc(q->buf_size);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ qbuf.addr = addr + offset;
+ qbuf.len = len - offset;
+ idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+ frames++;
+ }
+
+ if (frames)
+ mt76_dma_kick_queue(dev, q);
+
+ spin_unlock_bh(&q->lock);
+
+ return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ void *buf;
+ bool more;
+
+ spin_lock_bh(&q->lock);
+ do {
+ buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ } while (1);
+ spin_unlock_bh(&q->lock);
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+ mt76_dma_rx_cleanup(dev, q);
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ int len, bool more)
+{
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page);
+ struct sk_buff *skb = q->rx_head;
+
+ offset += q->buf_offset;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
+ q->buf_size);
+
+ if (more)
+ return;
+
+ q->rx_head = NULL;
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+ struct sk_buff *skb;
+ unsigned char *data;
+ int len;
+ int done = 0;
+ bool more;
+
+ while (done < budget) {
+ u32 info;
+
+ data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+ if (!data)
+ break;
+
+ if (q->rx_head) {
+ mt76_add_fragment(dev, q, data, len, more);
+ continue;
+ }
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+
+ skb_reserve(skb, q->buf_offset);
+ if (skb->tail + len > skb->end) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ if (q == &dev->q_rx[MT_RXQ_MCU]) {
+ u32 *rxfce = (u32 *) skb->cb;
+ *rxfce = info;
+ }
+
+ __skb_put(skb, len);
+ done++;
+
+ if (more) {
+ q->rx_head = skb;
+ continue;
+ }
+
+ dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+ }
+
+ mt76_dma_rx_fill(dev, q, true);
+ return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct mt76_dev *dev;
+ int qid, done = 0, cur;
+
+ dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+ qid = napi - dev->napi;
+
+ rcu_read_lock();
+
+ do {
+ cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
+ mt76_rx_poll_complete(dev, qid);
+ done += cur;
+ } while (cur && done < budget);
+
+ rcu_read_unlock();
+
+ if (done < budget) {
+ napi_complete(napi);
+ dev->drv->rx_poll_complete(dev, qid);
+ }
+
+ return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+ int i;
+
+ init_dummy_netdev(&dev->napi_dev);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+ 64);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ skb_queue_head_init(&dev->rx_skb[i]);
+ napi_enable(&dev->napi[i]);
+ }
+
+ return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+ .init = mt76_dma_init,
+ .alloc = mt76_dma_alloc_queue,
+ .add_buf = mt76_dma_add_buf,
+ .tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_reset = mt76_dma_rx_reset,
+ .kick = mt76_dma_kick_queue,
+};
+
+int mt76_dma_attach(struct mt76_dev *dev)
+{
+ dev->queue_ops = &mt76_dma_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+ mt76_dma_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ netif_napi_del(&dev->napi[i]);
+ mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 000000000000..1dad39697929
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define MT_RING_SIZE 0x10
+
+#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1 BIT(14)
+#define MT_DMA_CTL_BURST BIT(15)
+#define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0 BIT(30)
+#define MT_DMA_CTL_DMA_DONE BIT(31)
+
+struct mt76_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 000000000000..530e5593765c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+ struct device_node *np = dev->dev->of_node;
+ struct mtd_info *mtd;
+ const __be32 *list;
+ const char *part;
+ phandle phandle;
+ int offset = 0;
+ int size;
+ size_t retlen;
+ int ret;
+
+ if (!np)
+ return -ENOENT;
+
+ list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+ if (!list)
+ return -ENOENT;
+
+ phandle = be32_to_cpup(list++);
+ if (!phandle)
+ return -ENOENT;
+
+ np = of_find_node_by_phandle(phandle);
+ if (!np)
+ return -EINVAL;
+
+ part = of_get_property(np, "label", NULL);
+ if (!part)
+ part = np->name;
+
+ mtd = get_mtd_device_nm(part);
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
+
+ if (size <= sizeof(*list))
+ return -EINVAL;
+
+ offset = be32_to_cpup(list);
+ ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+ put_mtd_device(mtd);
+ if (ret)
+ return ret;
+
+ if (retlen < len)
+ return -EINVAL;
+
+ return 0;
+#else
+ return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+ struct device_node *np = dev->dev->of_node;
+ const u8 *mac;
+
+ if (!np)
+ return;
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ memcpy(dev->macaddr, mac, ETH_ALEN);
+#endif
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+ dev->eeprom.size = len;
+ dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+ if (!dev->eeprom.data)
+ return -ENOMEM;
+
+ return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 000000000000..5fcb2deb89a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_idx, _freq) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+ CHAN5G(36, 5180),
+ CHAN5G(40, 5200),
+ CHAN5G(44, 5220),
+ CHAN5G(48, 5240),
+
+ CHAN5G(52, 5260),
+ CHAN5G(56, 5280),
+ CHAN5G(60, 5300),
+ CHAN5G(64, 5320),
+
+ CHAN5G(100, 5500),
+ CHAN5G(104, 5520),
+ CHAN5G(108, 5540),
+ CHAN5G(112, 5560),
+ CHAN5G(116, 5580),
+ CHAN5G(120, 5600),
+ CHAN5G(124, 5620),
+ CHAN5G(128, 5640),
+ CHAN5G(132, 5660),
+ CHAN5G(136, 5680),
+ CHAN5G(140, 5700),
+
+ CHAN5G(149, 5745),
+ CHAN5G(153, 5765),
+ CHAN5G(157, 5785),
+ CHAN5G(161, 5805),
+ CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+ struct device_node *np = dev->dev->of_node;
+ struct ieee80211_hw *hw = dev->hw;
+ int led_pin;
+
+ if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ return 0;
+
+ snprintf(dev->led_name, sizeof(dev->led_name),
+ "mt76-%s", wiphy_name(hw->wiphy));
+
+ dev->led_cdev.name = dev->led_name;
+ dev->led_cdev.default_trigger =
+ ieee80211_create_tpt_led_trigger(hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ mt76_tpt_blink,
+ ARRAY_SIZE(mt76_tpt_blink));
+
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ dev->led_pin = led_pin;
+ dev->led_al = of_property_read_bool(np, "led-active-low");
+ }
+
+ return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+ struct ieee80211_supported_band *sband = &msband->sband;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap;
+ void *chanlist;
+ u16 mcs_map;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan),
+ GFP_KERNEL);
+ if (!msband->chan)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+ dev->chandef.chan = &sband->channels[0];
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[1] = 0xff;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ if (!vht)
+ return 0;
+
+ vht_cap = &sband->vht_cap;
+ vht_cap->vht_supported = true;
+
+ mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) |
+ (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) |
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2));
+
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+ return mt76_init_sband(dev, &dev->sband_2g,
+ mt76_channels_2ghz,
+ ARRAY_SIZE(mt76_channels_2ghz),
+ rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+ int n_rates, bool vht)
+{
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+ return mt76_init_sband(dev, &dev->sband_5g,
+ mt76_channels_5ghz,
+ ARRAY_SIZE(mt76_channels_5ghz),
+ rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+ struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+ bool found = false;
+ int i;
+
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ found = true;
+ break;
+ }
+
+ if (found)
+ return;
+
+ sband->n_channels = 0;
+ dev->hw->wiphy->bands[band] = NULL;
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ dev_set_drvdata(dev->dev, dev);
+
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->cc_lock);
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ if (dev->cap.has_2ghz) {
+ ret = mt76_init_sband_2g(dev, rates, n_rates);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->cap.has_5ghz) {
+ ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+ if (ret)
+ return ret;
+ }
+
+ wiphy_read_of_freq_limits(dev->hw->wiphy);
+ mt76_check_sband(dev, NL80211_BAND_2GHZ);
+ mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+ ret = mt76_led_init(dev);
+ if (ret)
+ return ret;
+
+ return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+
+ ieee80211_unregister_hw(hw);
+ mt76_tx_free(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+ if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ __skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ struct mt76_channel_state *state;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ if (dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ dev->chandef = *chandef;
+
+ if (!offchannel)
+ dev->main_chan = chandef->chan;
+
+ if (chandef->chan != dev->main_chan) {
+ state = mt76_channel_state(dev, chandef->chan);
+ memset(state, 0, sizeof(*state));
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct mt76_sband *sband;
+ struct ieee80211_channel *chan;
+ struct mt76_channel_state *state;
+ int ret = 0;
+
+ if (idx == 0 && dev->drv->update_survey)
+ dev->drv->update_survey(dev);
+
+ sband = &dev->sband_2g;
+ if (idx >= sband->sband.n_channels) {
+ idx -= sband->sband.n_channels;
+ sband = &dev->sband_5g;
+ }
+
+ if (idx >= sband->sband.n_channels)
+ return -ENOENT;
+
+ chan = &sband->sband.channels[idx];
+ state = mt76_channel_state(dev, chan);
+
+ memset(survey, 0, sizeof(*survey));
+ survey->channel = chan;
+ survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+ if (chan == dev->main_chan)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_lock_bh(&dev->cc_lock);
+ survey->time = div_u64(state->cc_active, 1000);
+ survey->time_busy = div_u64(state->cc_busy, 1000);
+ spin_unlock_bh(&dev->cc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_key_seq seq;
+ int i;
+
+ wcid->rx_check_pn = false;
+
+ if (!key)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ wcid->rx_check_pn = true;
+
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+ }
+}
+EXPORT_SYMBOL(mt76_wcid_key_setup);
+
+static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt76_rx_status mstat;
+
+ mstat = *((struct mt76_rx_status *) skb->cb);
+ memset(status, 0, sizeof(*status));
+
+ status->flag = mstat.flag;
+ status->freq = mstat.freq;
+ status->enc_flags = mstat.enc_flags;
+ status->encoding = mstat.encoding;
+ status->bw = mstat.bw;
+ status->rate_idx = mstat.rate_idx;
+ status->nss = mstat.nss;
+ status->band = mstat.band;
+ status->signal = mstat.signal;
+ status->chains = mstat.chains;
+
+ BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
+ BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal));
+ memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal));
+
+ return wcid_to_sta(mstat.wcid);
+}
+
+static int
+mt76_check_ccmp_pn(struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_hdr *hdr;
+ int ret;
+
+ if (!(status->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ if (!wcid || !wcid->rx_check_pn)
+ return 0;
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ /*
+ * Validate the first fragment both here and in mac80211
+ * All further fragments will be validated by mac80211 only.
+ */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ if (ieee80211_is_frag(hdr) &&
+ !ieee80211_is_first_frag(hdr->frame_control))
+ return 0;
+ }
+
+ BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
+ ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
+ sizeof(status->iv));
+ if (ret <= 0)
+ return -EINVAL; /* replay */
+
+ memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
+
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ status->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ int queue)
+{
+ struct napi_struct *napi = NULL;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+
+ if (queue >= 0)
+ napi = &dev->napi[queue];
+
+ while ((skb = __skb_dequeue(frames)) != NULL) {
+ if (mt76_check_ccmp_pn(skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ sta = mt76_rx_convert(skb);
+ ieee80211_rx_napi(dev->hw, sta, skb, napi);
+ }
+}
+
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+{
+ struct sk_buff_head frames;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&frames);
+
+ while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+ mt76_rx_aggr_reorder(skb, &frames);
+
+ mt76_rx_complete(dev, &frames, q);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 000000000000..09a14dead6e3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+ u32 val;
+
+ val = ioread32(dev->regs + offset);
+ trace_reg_rr(dev, offset, val);
+
+ return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ trace_reg_wr(dev, offset, val);
+ iowrite32(val, dev->regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76_mmio_rr(dev, offset) & ~mask;
+ mt76_mmio_wr(dev, offset, val);
+ return val;
+}
+
+static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+ int len)
+{
+ __iowrite32_copy(dev->regs + offset, data, len >> 2);
+}
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+ static const struct mt76_bus_ops mt76_mmio_ops = {
+ .rr = mt76_mmio_rr,
+ .rmw = mt76_mmio_rmw,
+ .wr = mt76_mmio_wr,
+ .copy = mt76_mmio_copy,
+ };
+
+ dev->bus = &mt76_mmio_ops;
+ dev->regs = regs;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 000000000000..129015c9d116
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE 256
+#define MT_MCU_RING_SIZE 32
+#define MT_RX_BUF_SIZE 2048
+
+struct mt76_dev;
+
+struct mt76_bus_ops {
+ u32 (*rr)(struct mt76_dev *dev, u32 offset);
+ void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+ u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+ void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
+ int len);
+};
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ MT_TXQ_BEACON,
+ MT_TXQ_CAB,
+ __MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+ MT_RXQ_MAIN,
+ MT_RXQ_MCU,
+ __MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+ dma_addr_t addr;
+ int len;
+};
+
+struct mt76_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ struct mt76_txwi_cache *txwi;
+ bool schedule;
+};
+
+struct mt76_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+ struct mt76_queue_regs __iomem *regs;
+
+ spinlock_t lock;
+ struct mt76_queue_entry *entry;
+ struct mt76_desc *desc;
+
+ struct list_head swq;
+ int swq_queued;
+
+ u16 head;
+ u16 tail;
+ int ndesc;
+ int queued;
+ int buf_size;
+
+ u8 buf_offset;
+ u8 hw_idx;
+
+ dma_addr_t desc_dma;
+ struct sk_buff *rx_head;
+};
+
+struct mt76_queue_ops {
+ int (*init)(struct mt76_dev *dev);
+
+ int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+
+ int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_buf *buf, int nbufs, u32 info,
+ struct sk_buff *skb, void *txwi);
+
+ void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+ int *len, u32 *info, bool *more);
+
+ void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+ void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ bool flush);
+
+ void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+struct mt76_wcid {
+ struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+
+ struct work_struct aggr_work;
+
+ u8 idx;
+ u8 hw_key_idx;
+
+ u8 sta:1;
+
+ u8 rx_check_pn;
+ u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+
+ __le16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+ s8 max_txpwr_adj;
+ bool sw_iv;
+};
+
+struct mt76_txq {
+ struct list_head list;
+ struct mt76_queue *hwq;
+ struct mt76_wcid *wcid;
+
+ struct sk_buff_head retry_q;
+
+ u16 agg_ssn;
+ bool send_bar;
+ bool aggr;
+};
+
+struct mt76_txwi_cache {
+ u32 txwi[8];
+ dma_addr_t dma_addr;
+ struct list_head list;
+};
+
+
+struct mt76_rx_tid {
+ struct rcu_head rcu_head;
+
+ struct mt76_dev *dev;
+
+ spinlock_t lock;
+ struct delayed_work reorder_work;
+
+ u16 head;
+ u8 size;
+ u8 nframes;
+
+ u8 started:1, stopped:1, timer_pending:1;
+
+ struct sk_buff *reorder_buf[];
+};
+
+enum {
+ MT76_STATE_INITIALIZED,
+ MT76_STATE_RUNNING,
+ MT76_SCANNING,
+ MT76_RESET,
+};
+
+struct mt76_hw_cap {
+ bool has_2ghz;
+ bool has_5ghz;
+};
+
+struct mt76_driver_ops {
+ u16 txwi_size;
+
+ void (*update_survey)(struct mt76_dev *dev);
+
+ int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, u32 *tx_info);
+
+ void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+
+ void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
+ void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+};
+
+struct mt76_channel_state {
+ u64 cc_active;
+ u64 cc_busy;
+};
+
+struct mt76_sband {
+ struct ieee80211_supported_band sband;
+ struct mt76_channel_state *chan;
+};
+
+struct mt76_dev {
+ struct ieee80211_hw *hw;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *main_chan;
+
+ spinlock_t lock;
+ spinlock_t cc_lock;
+ const struct mt76_bus_ops *bus;
+ const struct mt76_driver_ops *drv;
+ void __iomem *regs;
+ struct device *dev;
+
+ struct net_device napi_dev;
+ struct napi_struct napi[__MT_RXQ_MAX];
+ struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+ struct list_head txwi_cache;
+ struct mt76_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_queue q_rx[__MT_RXQ_MAX];
+ const struct mt76_queue_ops *queue_ops;
+
+ u8 macaddr[ETH_ALEN];
+ u32 rev;
+ unsigned long state;
+
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+ struct debugfs_blob_wrapper eeprom;
+ struct debugfs_blob_wrapper otp;
+ struct mt76_hw_cap cap;
+
+ u32 debugfs_reg;
+
+ struct led_classdev led_cdev;
+ char led_name[32];
+ bool led_al;
+ u8 led_pin;
+};
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rate_power {
+ union {
+ struct {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 ht[16];
+ s8 vht[10];
+ };
+ s8 all[38];
+ };
+};
+
+struct mt76_rx_status {
+ struct mt76_wcid *wcid;
+
+ unsigned long reorder_time;
+
+ u8 iv[6];
+
+ u8 aggr:1;
+ u8 tid;
+ u16 seqno;
+
+ u16 freq;
+ u32 flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3;
+ u8 rate_idx;
+ u8 nss;
+ u8 band;
+ u8 signal;
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+};
+
+#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field) \
+ FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+ return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+ return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+ struct mt76_sband *msband;
+ int idx;
+
+ if (c->band == NL80211_BAND_2GHZ)
+ msband = &dev->sband_2g;
+ else
+ msband = &dev->sband_5g;
+
+ idx = c - &msband->sband.channels[0];
+ return &msband->chan[idx];
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+ struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+ void *ptr = mtxq;
+
+ return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+static inline struct ieee80211_sta *
+wcid_to_sta(struct mt76_wcid *wcid)
+{
+ void *ptr = wcid;
+
+ if (!wcid || !wcid->sta)
+ return NULL;
+
+ return container_of(ptr, struct ieee80211_sta, drv_priv);
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+
+int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
+ u16 ssn, u8 size);
+void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
+
+void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
+ int queue);
+void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
new file mode 100644
index 000000000000..17df17afd9bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kfifo.h>
+
+#define MT7662_FIRMWARE "mt7662.bin"
+#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE 512
+
+#define MT76x2_RX_RING_SIZE 256
+#define MT_RX_HEADROOM 32
+
+#define MT_MAX_CHAINS 2
+
+#define MT_CALIBRATE_INTERVAL HZ
+
+#include "mt76.h"
+#include "mt76x2_regs.h"
+#include "mt76x2_mac.h"
+#include "mt76x2_dfs.h"
+
+struct mt76x2_mcu {
+ struct mutex mutex;
+
+ wait_queue_head_t wait;
+ struct sk_buff_head res_q;
+
+ u32 msg_seq;
+};
+
+struct mt76x2_rx_freq_cal {
+ s8 high_gain[MT_MAX_CHAINS];
+ s8 rssi_offset[MT_MAX_CHAINS];
+ s8 lna_gain;
+ u32 mcu_gain;
+};
+
+struct mt76x2_calibration {
+ struct mt76x2_rx_freq_cal rx;
+
+ u8 agc_gain_init[MT_MAX_CHAINS];
+ u8 agc_gain_cur[MT_MAX_CHAINS];
+
+ int avg_rssi[MT_MAX_CHAINS];
+ int avg_rssi_all;
+
+ s8 agc_gain_adjust;
+ s8 low_gain;
+
+ u8 temp;
+
+ bool init_cal_done;
+ bool tssi_cal_done;
+ bool tssi_comp_pending;
+ bool dpd_cal_done;
+ bool channel_cal_done;
+};
+
+struct mt76x2_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mac_address macaddr_list[8];
+
+ struct mutex mutex;
+
+ const u16 *beacon_offsets;
+ unsigned long wcid_mask[128 / BITS_PER_LONG];
+
+ int txpower_conf;
+ int txpower_cur;
+
+ u8 txdone_seq;
+ DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
+
+ struct mt76x2_mcu mcu;
+ struct sk_buff *rx_head;
+
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct pre_tbtt_tasklet;
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ u32 aggr_stats[32];
+
+ struct mt76_wcid global_wcid;
+ struct mt76_wcid __rcu *wcid[128];
+
+ spinlock_t irq_lock;
+ u32 irqmask;
+
+ struct sk_buff *beacons[8];
+ u8 beacon_mask;
+ u8 beacon_data_mask;
+
+ u32 rev;
+ u32 rxfilter;
+
+ u16 chainmask;
+
+ struct mt76x2_calibration cal;
+
+ s8 target_power;
+ s8 target_power_delta[2];
+ struct mt76_rate_power rate_power;
+ bool enable_tpc;
+
+ u8 coverage_class;
+ u8 slottime;
+
+ struct mt76x2_dfs_pattern_detector dfs_pd;
+};
+
+struct mt76x2_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76x2_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt76x2_vif *vif;
+ struct mt76x2_tx_status status;
+ int n_frames;
+};
+
+static inline bool is_mt7612(struct mt76x2_dev *dev)
+{
+ return (dev->rev >> 16) == 0x7612;
+}
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+
+static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
+{
+ mt76x2_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
+{
+ mt76x2_set_irq_mask(dev, mask, 0);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x2_dev *dev);
+void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
+void mt76x2_phy_power_on(struct mt76x2_dev *dev);
+int mt76x2_init_hardware(struct mt76x2_dev *dev);
+void mt76x2_stop_hardware(struct mt76x2_dev *dev);
+int mt76x2_eeprom_init(struct mt76x2_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
+
+int mt76x2_phy_start(struct mt76x2_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef);
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel);
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_dma_init(struct mt76x2_dev *dev);
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
+
+void mt76x2_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq);
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate);
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
new file mode 100644
index 000000000000..2629779e8d3e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ dev->irqmask &= ~clear;
+ dev->irqmask |= set;
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
+{
+ struct mt76x2_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(dev, intr, dev->irqmask);
+
+ intr &= dev->irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_PRE_TBTT)
+ tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+ /* send buffered multicast frames now */
+ if (intr & MT_INT_TBTT)
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+ if (intr & MT_INT_TX_STAT) {
+ mt76x2_mac_poll_tx_status(dev, true);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_GPTIMER) {
+ mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
new file mode 100644
index 000000000000..612feb593d7d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = file->private;
+ int i, j;
+
+ for (i = 0; i < 4; i++) {
+ seq_puts(file, "Length: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ", i * 8 + j + 1);
+ seq_puts(file, "\n");
+ seq_puts(file, "Count: ");
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+ seq_puts(file, "\n");
+ seq_puts(file, "--------");
+ for (j = 0; j < 8; j++)
+ seq_puts(file, "-----------");
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static void
+seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
+{
+ int i;
+
+ seq_printf(file, "%10s:", str);
+ for (i = 0; i < len; i++)
+ seq_printf(file, " %2d", val[i]);
+ seq_puts(file, "\n");
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+ struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+ seq_printf(file, "Target power: %d\n", dev->target_power);
+
+ seq_puts_array(file, "Delta", dev->target_power_delta,
+ ARRAY_SIZE(dev->target_power_delta));
+ seq_puts_array(file, "CCK", dev->rate_power.cck,
+ ARRAY_SIZE(dev->rate_power.cck));
+ seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
+ ARRAY_SIZE(dev->rate_power.ofdm));
+ seq_puts_array(file, "HT", dev->rate_power.ht,
+ ARRAY_SIZE(dev->rate_power.ht));
+ seq_puts_array(file, "VHT", dev->rate_power.vht,
+ ARRAY_SIZE(dev->rate_power.vht));
+ return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt76x2_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+ int i;
+ struct mt76x2_dev *dev = file->private;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ seq_printf(file, "engine: %d\n", i);
+ seq_printf(file, " hw pattern detected:\t%d\n",
+ dfs_pd->stats[i].hw_pattern);
+ seq_printf(file, " hw pulse discarded:\t%d\n",
+ dfs_pd->stats[i].hw_pulse_discarded);
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+ .open = mt76x2_dfs_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return;
+
+ debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
+ debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
+
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+ read_txpower);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
new file mode 100644
index 000000000000..f936dc9a5476
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh, \
+ w_tolerance, tl, th, t_tolerance, \
+ bl, bh, event_exp, power_jmp) \
+{ \
+ .mode = m, \
+ .avg_len = len, \
+ .e_low = el, \
+ .e_high = eh, \
+ .w_low = wl, \
+ .w_high = wh, \
+ .w_margin = w_tolerance, \
+ .t_low = tl, \
+ .t_high = th, \
+ .t_margin = t_tolerance, \
+ .b_low = bl, \
+ .b_high = bh, \
+ .event_expiration = event_exp, \
+ .pwr_jmp = power_jmp \
+}
+
+static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+ 0x7fffffff, 0x155cc0, 0x19cc),
+ RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+ 0x7fffffff, 0x155cc0, 0x19dd),
+ RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+ 0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0xfe808, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0xfe808, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0xfe808, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0xfe808, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+ 0x7fffffff, 0x14c080, 0x13dc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289),
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+ 0x7fffffff, 0x14c080, 0x19dd),
+ RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+ 0x7fffffff, 0x14c080, 0x12cc),
+ RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+ 0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+ /* 20MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 40MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ /* 80MHz */
+ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 },
+ RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+ 0x7fffffff, 0x14c080, 0x16cc),
+ { 0 }
+};
+
+static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
+ u8 enable)
+{
+ u32 data;
+
+ data = (1 << 1) | enable;
+ mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+{
+ bool ret = false;
+ u32 current_ts, delta_ts;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+ delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+ dfs_pd->chirp_pulse_ts = current_ts;
+
+ /* 12 sec */
+ if (delta_ts <= (12 * (1 << 20))) {
+ if (++dfs_pd->chirp_pulse_cnt > 8)
+ ret = true;
+ } else {
+ dfs_pd->chirp_pulse_cnt = 1;
+ }
+
+ return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_hw_pulse *pulse)
+{
+ u32 data;
+
+ /* select channel */
+ data = (MT_DFS_CH_EN << 16) | pulse->engine;
+ mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+ /* reported period */
+ pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+ /* reported width */
+ pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+ pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+ /* reported burst number */
+ pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
+ struct mt76x2_dfs_hw_pulse *pulse)
+{
+ bool ret = false;
+
+ if (!pulse->period || !pulse->w1)
+ return false;
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x2_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 10200) &&
+ pulse->period <= 61600);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ pulse->period <= 61600);
+ else
+ ret = (pulse->period >= 3500 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_ETSI:
+ if (pulse->engine >= 3)
+ break;
+
+ ret = (pulse->period >= 4900 &&
+ (pulse->period <= 10200 ||
+ pulse->period >= 12400) &&
+ pulse->period <= 100100);
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+ dev->mt76.chandef.chan->center_freq <= 5350) {
+ /* JPW53 */
+ if (pulse->w1 <= 130)
+ ret = (pulse->period >= 28360 &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 76900) &&
+ pulse->period <= 76940);
+ break;
+ }
+
+ if (pulse->engine > 3)
+ break;
+
+ if (pulse->engine == 3) {
+ ret = mt76x2_dfs_check_chirp(dev);
+ break;
+ }
+
+ /* check short pulse*/
+ if (pulse->w1 < 120)
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 4700 ||
+ pulse->period >= 6400) &&
+ (pulse->period <= 6800 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else if (pulse->w1 < 130) /* 120 - 130 */
+ ret = (pulse->period >= 2900 &&
+ (pulse->period <= 10100 ||
+ pulse->period >= 27560) &&
+ (pulse->period <= 27960 ||
+ pulse->period >= 28360) &&
+ (pulse->period <= 28700 ||
+ pulse->period >= 79900) &&
+ pulse->period <= 80100);
+ else
+ ret = (pulse->period >= 3900 &&
+ pulse->period <= 10100);
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return false;
+ }
+
+ return ret;
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ u32 engine_mask;
+ int i;
+
+ if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ goto out;
+
+ engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+ if (!(engine_mask & 0xf))
+ goto out;
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ struct mt76x2_dfs_hw_pulse pulse;
+
+ if (!(engine_mask & (1 << i)))
+ continue;
+
+ pulse.engine = i;
+ mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+ if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+ dfs_pd->stats[i].hw_pulse_discarded++;
+ continue;
+ }
+
+ /* hw detector rx radar pattern */
+ dfs_pd->stats[i].hw_pattern++;
+ ieee80211_radar_detected(dev->mt76.hw);
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+ return;
+ }
+
+ /* reset hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+ mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+{
+ u32 data;
+ u8 i, shift;
+ const struct mt76x2_radar_specs *radar_specs;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ shift = MT_DFS_NUM_ENGINES;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ shift = 2 * MT_DFS_NUM_ENGINES;
+ break;
+ default:
+ shift = 0;
+ break;
+ }
+
+ switch (dev->dfs_pd.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs[shift];
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs[shift];
+ break;
+ case NL80211_DFS_JP:
+ if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+ dev->mt76.chandef.chan->center_freq <= 5350)
+ radar_specs = &jp_w53_radar_specs[shift];
+ else
+ radar_specs = &jp_w56_radar_specs[shift];
+ break;
+ case NL80211_DFS_UNSET:
+ default:
+ return;
+ }
+
+ data = (MT_DFS_VGA_MASK << 16) |
+ (MT_DFS_PWR_GAIN_OFFSET << 12) |
+ (MT_DFS_PWR_DOWN_TIME << 8) |
+ (MT_DFS_SYM_ROUND << 4) |
+ (MT_DFS_DELTA_DELAY & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+ data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+ mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+ for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+ /* configure engine */
+ mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+ /* detection mode + avg_len */
+ data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+ (radar_specs[i].mode & 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+ /* dfs energy */
+ data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+ (radar_specs[i].e_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+ /* dfs period */
+ mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+ mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+ /* dfs burst */
+ mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+ mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+ /* dfs width */
+ data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+ (radar_specs[i].w_low & 0x0fff);
+ mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+ /* dfs margins */
+ data = (radar_specs[i].w_margin << 16) |
+ radar_specs[i].t_margin;
+ mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+ /* dfs event expiration */
+ mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+ /* dfs pwr adj */
+ mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+ }
+
+ /* reset status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+ /* enable detection*/
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+ mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+{
+ u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+ agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+ agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+ val_r8 = (agc_r8 & 0x00007e00) >> 9;
+ val_r4 = agc_r4 & ~0x1f000000;
+ val_r4 += (((val_r8 + 1) >> 1) << 24);
+ mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+ dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+ dfs_r31 += val_r8;
+ dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+ dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+ mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+ mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+{
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+ if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ dev->dfs_pd.region != NL80211_DFS_UNSET) {
+ mt76x2_dfs_set_bbp_params(dev);
+ /* enable debug mode */
+ mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+ mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+ } else {
+ /* disable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* clear detector status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+ mt76_wr(dev, 0x212c, 0);
+
+ mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+ MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+ }
+}
+
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ dfs_pd->region = NL80211_DFS_UNSET;
+ tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+ (unsigned long)dev);
+}
+
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+ enum nl80211_dfs_regions region)
+{
+ struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+ if (dfs_pd->region != region) {
+ tasklet_disable(&dfs_pd->dfs_tasklet);
+ dfs_pd->region = region;
+ mt76x2_dfs_init_params(dev);
+ tasklet_enable(&dfs_pd->dfs_tasklet);
+ }
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
new file mode 100644
index 000000000000..8dbc783cc6bc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DFS_H
+#define __MT76x2_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL (10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES 4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND 0
+#define MT_DFS_DELTA_DELAY 2
+#define MT_DFS_VGA_MASK 0
+#define MT_DFS_PWR_GAIN_OFFSET 3
+#define MT_DFS_PWR_DOWN_TIME 0xf
+#define MT_DFS_RX_PE_MASK 0xff
+#define MT_DFS_PKT_END_MASK 0
+#define MT_DFS_CH_EN 0xf
+
+struct mt76x2_radar_specs {
+ u8 mode;
+ u16 avg_len;
+ u16 e_low;
+ u16 e_high;
+ u16 w_low;
+ u16 w_high;
+ u16 w_margin;
+ u32 t_low;
+ u32 t_high;
+ u16 t_margin;
+ u32 b_low;
+ u32 b_high;
+ u32 event_expiration;
+ u16 pwr_jmp;
+};
+
+struct mt76x2_dfs_hw_pulse {
+ u8 engine;
+ u32 period;
+ u32 w1;
+ u32 w2;
+ u32 burst;
+};
+
+struct mt76x2_dfs_engine_stats {
+ u32 hw_pattern;
+ u32 hw_pulse_discarded;
+};
+
+struct mt76x2_dfs_pattern_detector {
+ enum nl80211_dfs_regions region;
+
+ u8 chirp_pulse_cnt;
+ u32 chirp_pulse_ts;
+
+ struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+ struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+ enum nl80211_dfs_regions region);
+
+#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
new file mode 100644
index 000000000000..fd1ec4743e0b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+int
+mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq)
+{
+ struct mt76_queue *q = &dev->mt76.q_tx[qid];
+ struct mt76_queue_buf buf;
+ dma_addr_t addr;
+ u32 tx_info;
+
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->mt76.dev, addr))
+ return -ENOMEM;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+ spin_lock_bh(&q->lock);
+ mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+}
+
+static int
+mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc)
+{
+ int ret;
+
+ q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->hw_idx = idx;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x2_rxwi));
+ if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+
+static int
+mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int ret;
+
+ q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static void
+mt76x2_tx_tasklet(unsigned long data)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
+ int i;
+
+ mt76x2_mac_process_tx_status_fifo(dev);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt76x2_mac_poll_tx_status(dev, false);
+ mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x2_dma_init(struct mt76x2_dev *dev)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+ int ret;
+ int i;
+ struct mt76_txwi_cache __maybe_unused *t;
+ struct mt76_queue *q;
+
+ BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
+ BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
+
+ mt76_dma_attach(&dev->mt76);
+
+ init_waitqueue_head(&dev->mcu.wait);
+ skb_queue_head_init(&dev->mcu.res_q);
+
+ tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i], MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
+ ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ return mt76_init_queues(dev);
+}
+
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
+{
+ tasklet_kill(&dev->tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
new file mode 100644
index 000000000000..47f79d83fcb4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DMA_H
+#define __MT76x2_DMA_H
+
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN GENMASK(13, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_TCO BIT(29)
+#define MT_TXD_INFO_UCO BIT(30)
+#define MT_TXD_INFO_ICO BIT(31)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+enum mt76x2_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
new file mode 100644
index 000000000000..9c9bf3e785ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+ void *dest, int len)
+{
+ if (field + len > dev->mt76.eeprom.size)
+ return -1;
+
+ memcpy(dest, dev->mt76.eeprom.data + field, len);
+ return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+{
+ void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+ memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+ return 0;
+}
+
+static void
+mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+{
+ u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+ case BOARD_TYPE_5GHZ:
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->mt76.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ }
+}
+
+static int
+mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
+{
+ int ret, i;
+
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt76x2_efuse_read(dev, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+ u16 *efuse_w = (u16 *) efuse;
+
+ if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+ return false;
+
+ if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+ return false;
+
+ return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id) \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
+ MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+ static const u8 cal_free_bytes[] = {
+ MT_EE_XTAL_TRIM_1,
+ MT_EE_TX_POWER_EXT_PA_5G + 1,
+ MT_EE_TX_POWER_0_START_2G,
+ MT_EE_TX_POWER_0_START_2G + 1,
+ MT_EE_TX_POWER_1_START_2G,
+ MT_EE_TX_POWER_1_START_2G + 1,
+ GROUP_5G(0),
+ GROUP_5G(1),
+ GROUP_5G(2),
+ GROUP_5G(3),
+ GROUP_5G(4),
+ GROUP_5G(5),
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+ MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 prev_grp0[4] = {
+ eeprom[MT_EE_TX_POWER_0_START_5G],
+ eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+ eeprom[MT_EE_TX_POWER_1_START_5G],
+ eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+ };
+ u16 val;
+ int i;
+
+ if (!mt76x2_has_cal_free_data(dev, efuse))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+ int offset = cal_free_bytes[i];
+
+ eeprom[offset] = efuse[offset];
+ }
+
+ if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+ efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+ if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+ efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+ memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+ val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+ if (val != 0xffff)
+ eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+{
+ u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+ if (!val)
+ val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+ switch (val) {
+ case 0x7662:
+ case 0x7612:
+ return 0;
+ default:
+ dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+ return -EINVAL;
+ }
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x2_dev *dev)
+{
+ void *efuse;
+ int len = MT7662_EEPROM_SIZE;
+ bool found;
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, len);
+ if (ret < 0)
+ return ret;
+
+ found = ret;
+ if (found)
+ found = !mt76x2_check_eeprom(dev);
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ efuse = dev->mt76.otp.data;
+
+ if (mt76x2_get_efuse_data(dev, efuse, len))
+ goto out;
+
+ if (found) {
+ mt76x2_apply_cal_free_data(dev, efuse);
+ } else {
+ /* FIXME: check if efuse data is complete */
+ found = true;
+ memcpy(dev->mt76.eeprom.data, efuse, len);
+ }
+
+out:
+ if (!found)
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline int
+mt76x2_sign_extend(u32 val, unsigned int size)
+{
+ bool sign = val & BIT(size - 1);
+
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
+}
+
+static inline int
+mt76x2_sign_extend_optional(u32 val, unsigned int size)
+{
+ bool enable = val & BIT(size);
+
+ return enable ? mt76x2_sign_extend(val, size) : 0;
+}
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0 && val != 0xff;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+{
+ s8 *dest = dev->cal.rx.high_gain;
+
+ if (!field_valid(val)) {
+ dest[0] = 0;
+ dest[1] = 0;
+ return;
+ }
+
+ dest[0] = mt76x2_sign_extend(val, 4);
+ dest[1] = mt76x2_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+{
+ s8 *dest = dev->cal.rx.rssi_offset;
+
+ if (!field_valid(val)) {
+ dest[chain] = 0;
+ return;
+ }
+
+ dest[chain] = mt76x2_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+ if (channel >= 184 && channel <= 196)
+ return MT_CH_5G_JAPAN;
+ if (channel <= 48)
+ return MT_CH_5G_UNII_1;
+ if (channel <= 64)
+ return MT_CH_5G_UNII_2;
+ if (channel <= 114)
+ return MT_CH_5G_UNII_2E_1;
+ if (channel <= 144)
+ return MT_CH_5G_UNII_2E_2;
+ return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+{
+ enum mt76x2_cal_channel_group group;
+
+ group = mt76x2_get_cal_channel_group(channel);
+ switch (group) {
+ case MT_CH_5G_JAPAN:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_1:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+ case MT_CH_5G_UNII_2E_1:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+ case MT_CH_5G_UNII_2E_2:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+ default:
+ return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+ }
+}
+
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ int channel = chan->hw_value;
+ s8 lna_5g[3], lna_2g;
+ u8 lna;
+ u16 val;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ else
+ val = mt76x2_get_5g_rx_gain(dev, channel);
+
+ mt76x2_set_rx_gain_group(dev, val);
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
+ } else {
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
+ }
+
+ val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
+ lna_2g = val & 0xff;
+ lna_5g[0] = val >> 8;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+ lna_5g[1] = val >> 8;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+ lna_5g[2] = val >> 8;
+
+ if (!field_valid(lna_5g[1]))
+ lna_5g[1] = lna_5g[0];
+
+ if (!field_valid(lna_5g[2]))
+ lna_5g[2] = lna_5g[0];
+
+ dev->cal.rx.mcu_gain = (lna_2g & 0xff);
+ dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+ dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+ dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+ lna_2g = 0;
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+ memset(lna_5g, 0, sizeof(lna_5g));
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ lna = lna_2g;
+ else if (channel <= 64)
+ lna = lna_5g[0];
+ else if (channel <= 128)
+ lna = lna_5g[1];
+ else
+ lna = lna_5g[2];
+
+ if (lna == 0xff)
+ lna = 0;
+
+ dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+}
+
+static s8
+mt76x2_rate_power_val(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return mt76x2_sign_extend_optional(val, 7);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan)
+{
+ bool is_5ghz;
+ u16 val;
+
+ is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ memset(t, 0, sizeof(*t));
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+ t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
+ t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+ else
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+ t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
+ t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+
+ if (is_5ghz)
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+ else
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+ t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
+ t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+ t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
+ t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+ t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
+ t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+ t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
+ t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+ t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
+ t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+ t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
+ t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+ t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
+ t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+ if (!is_5ghz)
+ val >>= 8;
+ t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
+}
+
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
+{
+ int i;
+ s8 ret = 0;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ ret = max(ret, r->all[i]);
+
+ return ret;
+}
+
+static void
+mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan, int chain, int offset)
+{
+ int channel = chan->hw_value;
+ int delta_idx;
+ u8 data[6];
+ u16 val;
+
+ if (channel < 6)
+ delta_idx = 3;
+ else if (channel < 11)
+ delta_idx = 4;
+ else
+ delta_idx = 5;
+
+ mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan, int chain, int offset)
+{
+ int channel = chan->hw_value;
+ enum mt76x2_cal_channel_group group;
+ int delta_idx;
+ u16 val;
+ u8 data[5];
+
+ group = mt76x2_get_cal_channel_group(channel);
+ offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+ if (channel >= 192)
+ delta_idx = 4;
+ else if (channel >= 184)
+ delta_idx = 3;
+ else if (channel < 44)
+ delta_idx = 3;
+ else if (channel < 52)
+ delta_idx = 4;
+ else if (channel < 58)
+ delta_idx = 3;
+ else if (channel < 98)
+ delta_idx = 4;
+ else if (channel < 106)
+ delta_idx = 3;
+ else if (channel < 116)
+ delta_idx = 4;
+ else if (channel < 130)
+ delta_idx = 3;
+ else if (channel < 149)
+ delta_idx = 4;
+ else if (channel < 157)
+ delta_idx = 3;
+ else
+ delta_idx = 4;
+
+ mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+ t->chain[chain].tssi_slope = data[0];
+ t->chain[chain].tssi_offset = data[1];
+ t->chain[chain].target_power = data[2];
+ t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+ t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan)
+{
+ u16 bw40, bw80;
+
+ memset(t, 0, sizeof(*t));
+
+ bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ bw40 >>= 8;
+ mt76x2_get_power_info_5g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_5G);
+ mt76x2_get_power_info_5g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_5G);
+ } else {
+ mt76x2_get_power_info_2g(dev, t, chan, 0,
+ MT_EE_TX_POWER_0_START_2G);
+ mt76x2_get_power_info_2g(dev, t, chan, 1,
+ MT_EE_TX_POWER_1_START_2G);
+ }
+
+ if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+ t->target_power = t->chain[0].target_power;
+
+ t->delta_bw40 = mt76x2_rate_power_val(bw40);
+ t->delta_bw80 = mt76x2_rate_power_val(bw80);
+}
+
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+{
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+ u16 val, slope;
+ u8 bounds;
+
+ memset(t, 0, sizeof(*t));
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+ return -EINVAL;
+
+ if (!mt76x2_ext_pa_enabled(dev, band))
+ return -EINVAL;
+
+ val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ if (!(val & BIT(7)))
+ return -EINVAL;
+
+ t->temp_25_ref = val & 0x7f;
+ if (band == NL80211_BAND_5GHZ) {
+ slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ } else {
+ slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+ }
+
+ t->high_slope = slope & 0xff;
+ t->low_slope = slope >> 8;
+ t->lower_bound = 0 - (bounds & 0xf);
+ t->upper_bound = (bounds >> 4) & 0xf;
+
+ return 0;
+}
+
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+ u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+ else
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+
+int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ ret = mt76x2_eeprom_load(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_eeprom_parse_hw_cap(dev);
+ mt76x2_eeprom_get_macaddr(dev);
+ mt76_eeprom_override(&dev->mt76);
+ dev->mt76.macaddr[0] &= ~BIT(1);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
new file mode 100644
index 000000000000..d79122728dca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "mt76x2.h"
+
+enum mt76x2_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_PCI_ID = 0x00A,
+ MT_EE_NIC_CONF_0 = 0x034,
+ MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_NIC_CONF_2 = 0x042,
+
+ MT_EE_XTAL_TRIM_1 = 0x03a,
+ MT_EE_XTAL_TRIM_2 = 0x09e,
+
+ MT_EE_LNA_GAIN = 0x044,
+ MT_EE_RSSI_OFFSET_2G_0 = 0x046,
+ MT_EE_RSSI_OFFSET_2G_1 = 0x048,
+ MT_EE_RSSI_OFFSET_5G_0 = 0x04a,
+ MT_EE_RSSI_OFFSET_5G_1 = 0x04c,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x050,
+ MT_EE_TX_POWER_DELTA_BW80 = 0x052,
+
+ MT_EE_TX_POWER_EXT_PA_5G = 0x054,
+
+ MT_EE_TX_POWER_0_START_2G = 0x056,
+ MT_EE_TX_POWER_1_START_2G = 0x05c,
+
+ /* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G 5
+#define MT_TX_POWER_GROUPS_5G 6
+ MT_EE_TX_POWER_0_START_5G = 0x062,
+
+ MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
+ MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
+
+ MT_EE_TX_POWER_1_START_5G = 0x080,
+
+ MT_EE_TX_POWER_CCK = 0x0a0,
+ MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
+ MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
+ MT_EE_TX_POWER_OFDM_5G_6M = 0x0b2,
+ MT_EE_TX_POWER_OFDM_5G_24M = 0x0b4,
+ MT_EE_TX_POWER_HT_MCS0 = 0x0a6,
+ MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
+ MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
+ MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
+ MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
+ MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
+ MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
+
+ MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
+ MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
+
+ MT_EE_RF_2G_TSSI_OFF_TXPOWER = 0x0f6,
+ MT_EE_RF_2G_RX_HIGH_GAIN = 0x0f8,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN = 0x0fa,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN = 0x0fc,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN = 0x0fe,
+
+ MT_EE_BT_RCAL_RESULT = 0x138,
+ MT_EE_BT_VCDL_CALIBRATION = 0x13c,
+ MT_EE_BT_PMUCFG = 0x13e,
+
+ __MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+enum mt76x2_board_type {
+ BOARD_TYPE_2GHZ = 1,
+ BOARD_TYPE_5GHZ = 2,
+};
+
+enum mt76x2_cal_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+ u8 target_power;
+
+ s8 delta_bw40;
+ s8 delta_bw80;
+
+ struct {
+ s8 tssi_slope;
+ s8 tssi_offset;
+ s8 target_power;
+ s8 delta;
+ } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+ u8 temp_25_ref;
+ int lower_bound; /* J */
+ int upper_bound; /* J */
+ unsigned int high_slope; /* J / dB */
+ unsigned int low_slope; /* J / dB */
+};
+
+static inline int
+mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+{
+ if ((field & 1) || field >= __MT_EE_MAX)
+ return -1;
+
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+{
+ return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+{
+ u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+ else
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
new file mode 100644
index 000000000000..1b00ae4465a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_mcu.h"
+
+struct mt76x2_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+static bool
+mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ switch (mt76_rr(dev, MT_MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+
+ return false;
+}
+
+static bool
+wait_for_wpdma(struct mt76x2_dev *dev)
+{
+ return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000);
+}
+
+static void
+mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = MT_PBF_SYS_CTRL_MCU_RESET |
+ MT_PBF_SYS_CTRL_DMA_RESET |
+ MT_PBF_SYS_CTRL_MAC_RESET |
+ MT_PBF_SYS_CTRL_PBF_RESET |
+ MT_PBF_SYS_CTRL_ASY_RESET;
+
+ mt76_set(dev, MT_PBF_SYS_CTRL, val);
+ mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+ mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+ mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+ const struct mt76x2_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_wr(dev, data->reg, data->value);
+ len--;
+ data++;
+ }
+}
+
+static void
+mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
+ MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40 \
+ (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
+ FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
+ FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
+ FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+ static const struct mt76x2_reg_pair vals[] = {
+ /* Copied from MediaTek reference source */
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x1efebcff },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAX_LEN_CFG, 0x003e3f00 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
+ { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
+ { MT_XIFS_TIME_CFG, 0x33a40d0a },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TBTT_SYNC_CFG, 0x00422010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+ { 0x1238, 0x001700c8 },
+ { MT_TX_SW_CFG0, 0x00101001 },
+ { MT_TX_SW_CFG1, 0x00010000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_TXOP_CTRL_CFG, 0x0400583f },
+ { MT_TX_RTS_CFG, 0x00100020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2290 },
+ { MT_TX_RETRY_CFG, 0x47f01f0f },
+ { MT_EXP_ACK_TIME, 0x002c00dc },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_PIFS_TX_CFG, 0x00060fff },
+ { MT_RX_FILTR_CFG, 0x00015f97 },
+ { MT_LEGACY_BASIC_RATE, 0x0000017f },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_AUX_CLK_CFG, 0x00000000 },
+ { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
+ { MT_TX_ALC_CFG_4, 0x00000000 },
+ { MT_TX_ALC_VGA3, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
+ { MT_TX_PWR_CFG_8, 0x0000003a },
+ { MT_TX_PWR_CFG_9, 0x0000003a },
+ { MT_EFUSE_CTRL, 0x0000d000 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
+ { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
+ { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
+ { MT_TX_SW_CFG3, 0x00000004 },
+ { MT_HT_FBK_TO_LEGACY, 0x00001818 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+ { MT_PROT_AUTO_TX_CFG, 0x00830083 },
+ { MT_HT_CTRL_CFG, 0x000001ff },
+ };
+ struct mt76x2_reg_pair prot_vals[] = {
+ { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
+ { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
+ { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
+ { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
+ };
+
+ mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+ mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+{
+ u16 eep_val;
+ s8 offset = 0;
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+ offset = eep_val & 0x7f;
+ if ((eep_val & 0xff) == 0xff)
+ offset = 0;
+ else if (eep_val & 0x80)
+ offset = 0 - offset;
+
+ eep_val >>= 8;
+ if (eep_val == 0x00 || eep_val == 0xff) {
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val &= 0xff;
+
+ if (eep_val == 0x00 || eep_val == 0xff)
+ eep_val = 0x14;
+ }
+
+ eep_val &= 0x7f;
+ mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+ mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+ eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+ case 0:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+ break;
+ case 1:
+ mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+{
+ static const u8 null_addr[ETH_ALEN] = {};
+ const u8 *macaddr = dev->mt76.macaddr;
+ u32 val;
+ int i, k;
+
+ if (!mt76x2_wait_for_mac(dev))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+ val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+ val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+ mt76x2_mac_pbf_init(dev);
+ mt76_write_mac_initvals(dev);
+ mt76x2_fixup_xtal(dev);
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
+
+ if (is_mt7612(dev))
+ mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+ mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+ mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+ mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+ mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+ mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+ mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+ FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+ 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ if (!hard)
+ return 0;
+
+ for (i = 0; i < 256 / 32; i++)
+ mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+ for (i = 0; i < 256; i++)
+ mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+
+ for (i = 0; i < 16; i++)
+ for (k = 0; k < 4; k++)
+ mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+
+ for (i = 0; i < 8; i++) {
+ mt76x2_mac_set_bssid(dev, i, null_addr);
+ mt76x2_mac_set_beacon(dev, i, NULL);
+ }
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+ mt76x2_init_beacon_offsets(dev);
+
+ mt76x2_set_tx_ackto(dev);
+
+ return 0;
+}
+
+int mt76x2_mac_start(struct mt76x2_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ for (i = 0; i < 16; i++)
+ mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ wait_for_wpdma(dev);
+ usleep_range(50, 100);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_TX_STAT);
+
+ return 0;
+}
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if (mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
+ continue;
+
+ if (mt76_rr(dev, MT_BBP(IBI, 12)))
+ continue;
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+
+void mt76x2_mac_resume(struct mt76x2_dev *dev)
+{
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+ mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+ udelay(1);
+
+ mt76_clear(dev, 0x1001c, 0xff);
+ mt76_set(dev, 0x1001c, 0x30);
+
+ mt76_wr(dev, 0x10014, 0x484f);
+ udelay(1);
+
+ mt76_set(dev, 0x10130, BIT(17));
+ udelay(125);
+
+ mt76_clear(dev, 0x10130, BIT(16));
+ udelay(50);
+
+ mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+ int shift = unit ? 8 : 0;
+
+ /* Enable RF BG */
+ mt76_set(dev, 0x10130, BIT(0) << shift);
+ udelay(10);
+
+ /* Enable RFDIG LDO/AFE/ABB/ADDA */
+ mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+ udelay(10);
+
+ /* Switch RFDIG power to internal LDO */
+ mt76_clear(dev, 0x10130, BIT(2) << shift);
+ udelay(10);
+
+ mt76x2_power_on_rf_patch(dev);
+
+ mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ /* Turn on WL MTCMOS */
+ mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+ val = MT_WLAN_MTC_CTRL_STATE_UP |
+ MT_WLAN_MTC_CTRL_PWR_ACK |
+ MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+ mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+ udelay(10);
+
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ udelay(10);
+
+ mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+ mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+ /* Turn on AD/DA power down */
+ mt76_clear(dev, 0x11204, BIT(3));
+
+ /* WLAN function enable */
+ mt76_set(dev, 0x10080, BIT(0));
+
+ /* Release BBP software reset */
+ mt76_clear(dev, 0x10064, BIT(18));
+
+ mt76x2_power_on_rf(dev, 0);
+ mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+{
+ u8 ackto, sifs, slottime = dev->slottime;
+
+ slottime += 3 * dev->coverage_class;
+
+ sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+ ackto = slottime + sifs;
+ mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+ MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+}
+
+static void
+mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+ }
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt76x2_set_wlan_state(dev, enable);
+}
+
+int mt76x2_init_hardware(struct mt76x2_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 1024 byte per beacon */
+ 0xc000,
+ 0xc400,
+ 0xc800,
+ 0xcc00,
+ 0xd000,
+ 0xd400,
+ 0xd800,
+ 0xdc00,
+
+ /* BSS idx 8-15 not used for beacons */
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ };
+ u32 val;
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+ tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+ (unsigned long) dev);
+
+ dev->chainmask = 0x202;
+ dev->global_wcid.idx = 255;
+ dev->global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+ val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+ MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+ val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+ mt76x2_reset_wlan(dev, true);
+ mt76x2_power_on(dev);
+
+ ret = mt76x2_eeprom_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_mac_reset(dev, true);
+ if (ret)
+ return ret;
+
+ dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+ ret = mt76x2_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ ret = mt76x2_mac_start(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76x2_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_mac_stop(dev, false);
+
+ return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76x2_mcu_set_radio_state(dev, false);
+ mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x2_dev *dev)
+{
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76x2_stop_hardware(dev);
+ mt76x2_dma_cleanup(dev);
+ mt76x2_mcu_cleanup(dev);
+}
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x2_txwi),
+ .update_survey = mt76x2_update_channel,
+ .tx_prepare_skb = mt76x2_tx_prepare_skb,
+ .tx_complete_skb = mt76x2_tx_complete_skb,
+ .rx_skb = mt76x2_queue_rx_skb,
+ .rx_poll_complete = mt76x2_rx_poll_complete,
+ };
+ struct ieee80211_hw *hw;
+ struct mt76x2_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->mt76.dev = pdev;
+ dev->mt76.hw = hw;
+ dev->mt76.drv = &drv_ops;
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->irq_lock);
+
+ return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76x2_dfs_set_domain(dev, request->dfs_region);
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76x2_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ }
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
+ mt76);
+ u32 val;
+
+ val = MT_LED_STATUS_DURATION(0xff) |
+ MT_LED_STATUS_OFF(delay_off) |
+ MT_LED_STATUS_ON(delay_on);
+
+ mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+ mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt76x2_led_set_config(mt76, delta_on, delta_off);
+ return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt76x2_led_set_config(mt76, 0, 0xff);
+ else
+ mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+static void
+mt76x2_init_txpower(struct mt76x2_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_channel *chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76_rate_power t = {};
+ int target_power;
+ int i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ target_power = max_t(int, (txp.chain[0].target_power +
+ txp.chain[0].delta),
+ (txp.chain[1].target_power +
+ txp.chain[1].delta));
+
+ mt76x2_get_rate_power(dev, &t, chan);
+
+ chan->max_power = mt76x2_get_max_rate_power(&t) +
+ target_power;
+ chan->max_power /= 2;
+
+ /* convert to combined output power on 2x2 devices */
+ chan->max_power += 3;
+ }
+}
+
+int mt76x2_register_device(struct mt76x2_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ void *status_fifo;
+ int fifo_size;
+ int i, ret;
+
+ fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
+ status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+ if (!status_fifo)
+ return -ENOMEM;
+
+ kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+ ret = mt76x2_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ hw->sta_data_size = sizeof(struct mt76x2_sta);
+ hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ wiphy->reg_notifier = mt76x2_regd_notifier;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ mt76x2_dfs_init_detector(dev);
+
+ /* init led callbacks */
+ dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+ ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+ ARRAY_SIZE(mt76x2_rates));
+ if (ret)
+ goto fail;
+
+ mt76x2_init_debugfs(dev);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+
+ return 0;
+
+fail:
+ mt76x2_stop_hardware(dev);
+ return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
new file mode 100644
index 000000000000..6c30b5eaa9ca
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+{
+ idx &= 7;
+ mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+ mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+ get_unaligned_le16(addr + 4));
+}
+
+static int
+mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (idx >= 8)
+ idx = 0;
+
+ if (status->band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ }
+
+ if (idx >= 4)
+ idx = 0;
+
+ status->rate_idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ status->rate_idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->encoding = RX_ENC_VHT;
+ status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+ status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_LDPC)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 4);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ spin_lock_bh(&dev->mt76.lock);
+ wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+ u16 txwi_flags = 0;
+ u8 nss;
+ s8 txpwr_adj, max_txpwr_adj;
+ u8 ccmp_pn[8];
+
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (wcid)
+ txwi->wcid = wcid->idx;
+ else
+ txwi->wcid = 0xff;
+
+ txwi->pktid = 1;
+
+ if (wcid && wcid->sw_iv && key) {
+ u64 pn = atomic64_inc_return(&key->tx_pn);
+ ccmp_pn[0] = pn;
+ ccmp_pn[1] = pn >> 8;
+ ccmp_pn[2] = 0;
+ ccmp_pn[3] = 0x20 | (key->keyidx << 6);
+ ccmp_pn[4] = pn >> 16;
+ ccmp_pn[5] = pn >> 24;
+ ccmp_pn[6] = pn >> 32;
+ ccmp_pn[7] = pn >> 40;
+ txwi->iv = *((u32 *) &ccmp_pn[0]);
+ txwi->eiv = *((u32 *) &ccmp_pn[1]);
+ }
+
+ spin_lock_bh(&dev->mt76.lock);
+ if (wcid && (rate->idx < 0 || !rate->count)) {
+ txwi->rate = wcid->tx_rate;
+ max_txpwr_adj = wcid->max_txpwr_adj;
+ nss = wcid->tx_rate_nss;
+ } else {
+ txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+ max_txpwr_adj);
+ txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ txwi->txstream = 0x13;
+ else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+ txwi->txstream = 0x93;
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+ if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+ txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+ if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ txwi_flags |= MT_TXWI_FLAGS_MMPS;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->pktid |= MT_TXWI_PKTID_PROBE;
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size - 1);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density);
+ }
+
+ if (ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control))
+ txwi_flags |= MT_TXWI_FLAGS_TS;
+
+ txwi->flags |= cpu_to_le16(txwi_flags);
+ txwi->len_ctl = cpu_to_le16(skb->len);
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+
+static struct mt76_wcid *
+mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
+{
+ struct mt76x2_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ sta = container_of(wcid, struct mt76x2_sta, wcid);
+ return &sta->vif->group_wcid;
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct mt76x2_rxwi *rxwi = rxi;
+ u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
+ bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
+ int pad_len = 0;
+ u8 pn_len;
+ u8 wcid;
+ int len;
+
+ if (rxinfo & MT_RXINFO_L2PAD)
+ pad_len += 2;
+
+ if (rxinfo & MT_RXINFO_DECRYPT) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+ status->flag |= RX_FLAG_MIC_STRIPPED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
+ status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
+
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
+ if (pn_len) {
+ int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
+ u8 *data = skb->data + offset;
+
+ status->iv[0] = data[7];
+ status->iv[1] = data[6];
+ status->iv[2] = data[5];
+ status->iv[3] = data[4];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ /*
+ * Driver CCMP validation can't deal with fragments.
+ * Let mac80211 take care of it.
+ */
+ if (rxinfo & MT_RXINFO_FRAG) {
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+ } else {
+ pad_len += pn_len << 2;
+ len -= pn_len << 2;
+ }
+ }
+
+ mt76x2_remove_hdr_pad(skb, pad_len);
+
+ if (rxinfo & MT_RXINFO_BA)
+ status->aggr = true;
+
+ if (WARN_ON_ONCE(len > skb->len))
+ return -EINVAL;
+
+ pskb_trim(skb, len);
+ status->chains = BIT(0) | BIT(1);
+ status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
+ status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
+ status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+
+ status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
+ status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
+
+ return mt76x2_mac_process_rate(status, rate);
+}
+
+static int
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x2_tx_status *st, int n_frames)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + st->retry;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ if (last_rate > 0)
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+static void
+mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
+ u8 *update)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x2_sta *msta = NULL;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ void *priv;
+
+ priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
+
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
+
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
+{
+ struct mt76x2_tx_status stat = {};
+ unsigned long flags;
+ u8 update = 1;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return;
+
+ trace_mac_txstat_poll(dev);
+
+ while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+ u32 stat1, stat2;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+ if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ stat.valid = 1;
+ stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+ stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ trace_mac_txstat_fetch(dev, &stat);
+
+ if (!irq) {
+ mt76x2_send_tx_status(dev, &stat, &update);
+ continue;
+ }
+
+ kfifo_put(&dev->txstatus_fifo, stat);
+ }
+}
+
+static void
+mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *txwi_ptr)
+{
+ struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
+ struct mt76x2_txwi *txwi = txwi_ptr;
+
+ mt76x2_mac_poll_tx_status(dev, false);
+
+ txi->tries = 0;
+ txi->jiffies = jiffies;
+ txi->wcid = txwi->wcid;
+ txi->pktid = txwi->pktid;
+ trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ mt76x2_tx_complete(dev, skb);
+}
+
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
+{
+ struct mt76x2_tx_status stat;
+ u8 update = 1;
+
+ while (kfifo_get(&dev->txstatus_fifo, &stat))
+ mt76x2_send_tx_status(dev, &stat, &update);
+}
+
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ if (e->txwi)
+ mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+ else
+ dev_kfree_skb_any(e->skb);
+}
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+ mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP)
+ iv_data[3] |= 0x20;
+ }
+
+ mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ return 0;
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76x2_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76x2_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+ sizeof(key_data));
+
+ return 0;
+}
+
+static int
+mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+ struct mt76x2_txwi txwi;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+ return -ENOSPC;
+
+ mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+ int beacon_addr = dev->beacon_offsets[bcn_idx];
+ int ret = 0;
+ int i;
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+ if (skb) {
+ ret = mt76_write_beacon(dev, beacon_addr, skb);
+ if (!ret)
+ dev->beacon_data_mask |= BIT(bcn_idx) &
+ dev->beacon_mask;
+ } else {
+ dev->beacon_data_mask &= ~BIT(bcn_idx);
+ for (i = 0; i < beacon_len; i += 4)
+ mt76_wr(dev, beacon_addr + i, 0);
+ }
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+ return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+ struct sk_buff *skb)
+{
+ bool force_update = false;
+ int bcn_idx = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (vif_idx == i) {
+ force_update = !!dev->beacons[i] ^ !!skb;
+
+ if (dev->beacons[i])
+ dev_kfree_skb(dev->beacons[i]);
+
+ dev->beacons[i] = skb;
+ __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+ } else if (force_update && dev->beacons[i]) {
+ __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+ }
+
+ bcn_idx += !!dev->beacons[i];
+ }
+
+ for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (!(dev->beacon_data_mask & BIT(i)))
+ break;
+
+ __mt76x2_mac_set_beacon(dev, i, NULL);
+ }
+
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+ bcn_idx - 1);
+ return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+{
+ u8 old_mask = dev->beacon_mask;
+ bool en;
+ u32 reg;
+
+ if (val) {
+ dev->beacon_mask |= BIT(vif_idx);
+ } else {
+ dev->beacon_mask &= ~BIT(vif_idx);
+ mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+ }
+
+ if (!!old_mask == !!dev->beacon_mask)
+ return;
+
+ en = dev->beacon_mask;
+
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ reg = MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN;
+ mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+ if (en)
+ mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ else
+ mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76_channel_state *state;
+ u32 active, busy;
+
+ state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+ busy = mt76_rr(dev, MT_CH_BUSY);
+ active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ state->cc_busy += busy;
+ state->cc_active += active;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+ struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
+ mac_work.work);
+ int i, idx;
+
+ mt76x2_update_channel(&dev->mt76);
+ for (i = 0, idx = 0; i < 16; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+ dev->aggr_stats[idx++] += val & 0xffff;
+ dev->aggr_stats[idx++] += val >> 16;
+ }
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
new file mode 100644
index 000000000000..8a8a25e32d5f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76.h"
+
+struct mt76x2_dev;
+struct mt76x2_sta;
+struct mt76x2_vif;
+struct mt76x2_txwi;
+
+struct mt76x2_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+struct mt76x2_tx_info {
+ unsigned long jiffies;
+ u8 tries;
+
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+};
+
+struct mt76x2_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ u8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+};
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_UNICAST BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0 BIT(22)
+#define MT_RXINFO_SW_FTYPE1 BIT(23)
+#define MT_RXINFO_PROBE_RESP BIT(24)
+#define MT_RXINFO_BEACON BIT(25)
+#define MT_RXINFO_DISASSOC BIT(26)
+#define MT_RXINFO_DEAUTH BIT(27)
+#define MT_RXINFO_ACTION BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF BIT(31)
+
+#define MT_RXWI_TID GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC BIT(6)
+#define MT_RXWI_RATE_BW GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI BIT(9)
+#define MT_RXWI_RATE_STBC BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM BIT(11)
+#define MT_RXWI_RATE_PHY GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+
+#define MT_TX_PWR_ADJ GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE BIT(7)
+
+struct mt76x2_txwi {
+ __le16 flags;
+ __le16 rate;
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+ __le32 iv;
+ __le32 eiv;
+ u8 aid;
+ u8 txstream;
+ u8 ctl2;
+ u8 pktid;
+} __packed __aligned(4);
+
+static inline struct mt76x2_tx_info *
+mt76x2_skb_tx_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ return (void *) info->status.status_driver_data;
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
+int mt76x2_mac_start(struct mt76x2_dev *dev);
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x2_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+ void *rxi);
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta);
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+ struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
new file mode 100644
index 000000000000..bf26284b9989
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt76x2_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ret = mt76x2_phy_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x2_stop_hardware(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x2_sta *sta;
+
+ sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+
+static int
+mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ unsigned int idx = 0;
+
+ if (vif->addr[0] & BIT(1))
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+ /*
+ * Client mode typically only has one configurable BSSID register,
+ * which is used for bssidx=0. This is linked to the MAC address.
+ * Since mac80211 allows changing interface types, and we cannot
+ * force the use of the primary MAC address for a station mode
+ * interface, we need some other way of configuring a per-interface
+ * remote BSSID.
+ * The hardware provides an AP-Client feature, where bssidx 0-7 are
+ * used for AP mode and bssidx 8-15 for client mode.
+ * We shift the station interface bss index by 8 to force the
+ * hardware to recognize the BSSID.
+ * The resulting bssidx mismatch for unicast frames is ignored by hw.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ idx += 8;
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = 254 - idx;
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x2_txq_init(dev, vif->txq);
+
+ return 0;
+}
+
+static void
+mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+
+static int
+mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ mt76_set_channel(&dev->mt76);
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ cancel_delayed_work_sync(&dev->cal_work);
+
+ mt76x2_mac_stop(dev, true);
+ ret = mt76x2_phy_set_channel(dev, chandef);
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_IDLE);
+ mt76_rr(dev, MT_CH_BUSY);
+
+ mt76x2_dfs_init_params(dev);
+
+ mt76x2_mac_resume(dev);
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+
+ return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ else
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->txpower_conf = hw->conf.power_level * 2;
+
+ /* convert to per-chain power for 2x2 devices */
+ dev->txpower_conf -= 6;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+ mt76x2_phy_set_txpower(dev);
+ mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_BSSID)
+ mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL,
+ info->beacon_int << 4);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+ info->enable_beacon);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ dev->slottime = slottime;
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x2_txq_init(dev, sta->txq[i]);
+
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->wcid_mask, idx);
+ mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct mt76x2_dev *dev = hw->priv;
+ int idx = msta->wcid.idx;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ mt76x2_mac_wcid_set_drop(dev, idx, true);
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ mt76x2_mac_wcid_set_drop(dev, idx, false);
+ break;
+ }
+}
+
+static int
+mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x2_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int
+mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ *dbm = dev->txpower_cur / 2;
+
+ /* convert from per-chain power to combined output on 2x2 devices */
+ *dbm += 3;
+
+ return 0;
+}
+
+static int
+mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct mt76x2_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+ dev->coverage_class = coverage_class;
+ mt76x2_set_tx_ackto(dev);
+ mutex_unlock(&dev->mutex);
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+ .tx = mt76x2_tx,
+ .start = mt76x2_start,
+ .stop = mt76x2_stop,
+ .add_interface = mt76x2_add_interface,
+ .remove_interface = mt76x2_remove_interface,
+ .config = mt76x2_config,
+ .configure_filter = mt76x2_configure_filter,
+ .bss_info_changed = mt76x2_bss_info_changed,
+ .sta_add = mt76x2_sta_add,
+ .sta_remove = mt76x2_sta_remove,
+ .sta_notify = mt76x2_sta_notify,
+ .set_key = mt76x2_set_key,
+ .conf_tx = mt76x2_conf_tx,
+ .sw_scan_start = mt76x2_sw_scan,
+ .sw_scan_complete = mt76x2_sw_scan_complete,
+ .flush = mt76x2_flush,
+ .ampdu_action = mt76x2_ampdu_action,
+ .get_txpower = mt76x2_get_txpower,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .set_coverage_class = mt76x2_set_coverage_class,
+ .get_survey = mt76_get_survey,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
new file mode 100644
index 000000000000..15820b11f9db
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_eeprom.h"
+
+struct mt76x2_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x2_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
+static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
+ timeout);
+ return skb_dequeue(&dev->mcu.res_q);
+}
+
+static int
+mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd)
+{
+ unsigned long expires = jiffies + HZ;
+ int ret;
+ u8 seq;
+
+ if (!skb)
+ return -EINVAL;
+
+ mutex_lock(&dev->mcu.mutex);
+
+ seq = ++dev->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+ if (ret)
+ goto out;
+
+ while (1) {
+ u32 *rxfce;
+ bool check_seq = false;
+
+ skb = mt76x2_mcu_get_response(dev, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev,
+ "MCU message %d (seq %d) timed out\n", cmd,
+ seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxfce = (u32 *) skb->cb;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->mcu.mutex);
+
+ return ret;
+}
+
+static int
+mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+{
+ const struct firmware *fw = NULL;
+ struct mt76x2_patch_header *hdr;
+ bool rom_protect = !is_mt7612(dev);
+ int len, ret = 0;
+ __le32 *cur;
+ u32 patch_mask, patch_reg;
+
+ if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ goto out;
+ }
+
+ ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ ret = -EIO;
+ dev_err(dev->mt76.dev, "Failed to load firmware\n");
+ goto out;
+ }
+
+ hdr = (struct mt76x2_patch_header *) fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = fw->size - sizeof(*hdr);
+ mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ /* Trigger ROM */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+ dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ /* release semaphore */
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x2_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x2_fw_header *hdr;
+ int i, len, ret;
+ __le32 *cur;
+ u32 offset, val;
+
+ ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto error;
+
+ hdr = (const struct mt76x2_fw_header *) fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto error;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+ cur += len / sizeof(*cur);
+ len = le32_to_cpu(hdr->dlm_len);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ offset = MT_MCU_DLM_ADDR_E3;
+ else
+ offset = MT_MCU_DLM_ADDR;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ mt76_wr_copy(dev, offset, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+ for (i = 200; i > 0; i--) {
+ val = mt76_rr(dev, MT_MCU_COM_REG0);
+
+ if (val & 1)
+ break;
+
+ msleep(10);
+ }
+
+ if (!i) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ release_firmware(fw);
+ return -ETIMEDOUT;
+ }
+
+ dev_info(dev->mt76.dev, "Firmware running!\n");
+
+ release_firmware(fw);
+
+ return ret;
+
+error:
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+static int
+mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+ u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
+}
+
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
+}
+
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->chainmask),
+ };
+
+ /* first set the channel without the extension channel info */
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+}
+
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
+}
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 param)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(param),
+ };
+ int ret;
+
+ mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+}
+
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+ return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
+}
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt76pci_load_rom_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_function_select(dev, Q_SELECT, 1);
+ return 0;
+}
+
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
+{
+ struct sk_buff *skb;
+
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+ usleep_range(20000, 30000);
+
+ while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
new file mode 100644
index 000000000000..d7a7e83262ce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL 0x0704
+#define MT_MCU_CLOCK_CTL 0x0708
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+#define MT_MCU_PCIE_REMAP_BASE1 0x0740
+#define MT_MCU_PCIE_REMAP_BASE2 0x0744
+#define MT_MCU_PCIE_REMAP_BASE3 0x0748
+#define MT_MCU_PCIE_REMAP_BASE4 0x074C
+
+#define MT_LED_CTRL 0x0770
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0 0x0774
+#define MT_LED_TX_BLINK_1 0x0778
+
+#define MT_LED_S0_BASE 0x077C
+#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE 0x0780
+#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+ MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+ MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+ MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_SEMAPHORE_00 0x07B0
+#define MT_MCU_SEMAPHORE_01 0x07B4
+#define MT_MCU_SEMAPHORE_02 0x07B8
+#define MT_MCU_SEMAPHORE_03 0x07BC
+
+#define MT_MCU_ROM_PATCH_OFFSET 0x80000
+#define MT_MCU_ROM_PATCH_ADDR 0x90000
+
+#define MT_MCU_ILM_OFFSET 0x80000
+#define MT_MCU_ILM_ADDR 0x80000
+
+#define MT_MCU_DLM_OFFSET 0x100000
+#define MT_MCU_DLM_ADDR 0x90000
+#define MT_MCU_DLM_ADDR_E3 0x90800
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ USB2_SW_DISCONNECT = 2,
+ USB3_SW_DISCONNECT = 3,
+ LOG_FW_DEBUG_MSG = 4,
+ GET_FW_VERSION = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibration {
+ MCU_CAL_R = 1,
+ MCU_CAL_TEMP_SENSOR,
+ MCU_CAL_RXDCOC,
+ MCU_CAL_RC,
+ MCU_CAL_SX_LOGEN,
+ MCU_CAL_LC,
+ MCU_CAL_TX_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_TSSI,
+ MCU_CAL_TSSI_COMP,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQC_FI,
+ MCU_CAL_RXIQC_FD,
+ MCU_CAL_PWRON,
+ MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+ MT_RF_CR,
+ MT_BBP_CR,
+ MT_RF_BBP_CR,
+ MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+ u8 pa_mode;
+ u8 cal_mode;
+ u16 pad;
+
+ u8 slope0;
+ u8 slope1;
+ u8 offset0;
+ u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+ u32 param);
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+ bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
new file mode 100644
index 000000000000..e66f047ea448
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7662) },
+ { PCI_DEVICE(0x14c3, 0x7612) },
+ { PCI_DEVICE(0x14c3, 0x7602) },
+ { },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mt76x2_dev *dev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dev = mt76x2_alloc_device(&pdev->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt76x2_register_device(dev);
+ if (ret)
+ goto error;
+
+ /* Fix up ASPM configuration */
+
+ /* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+ mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+ /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+ mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+ /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+ return 0;
+
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+ mt76_unregister_device(mdev);
+ mt76x2_cleanup(dev);
+ ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76pci_device_table,
+ .probe = mt76pci_probe,
+ .remove = mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
new file mode 100644
index 000000000000..5b742749d5de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain -= offset / 2;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+ s8 gain;
+
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+ gain += offset;
+ mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+static void
+mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+ s8 *gain_adj = dev->cal.rx.high_gain;
+
+ mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+ mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+ mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+ mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+ struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+ rssi += cal->rssi_offset[chain];
+ rssi -= cal->lna_gain;
+
+ return rssi;
+}
+
+static u8
+mt76x2_txpower_check(int value)
+{
+ if (value < 0)
+ return 0;
+ if (value > 0x2f)
+ return 0x2f;
+ return value;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+ enum nl80211_chan_width width = dev->mt76.chandef.width;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ int txp_0, txp_1, delta = 0;
+ struct mt76_rate_power t = {};
+
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (width == NL80211_CHAN_WIDTH_40)
+ delta = txp.delta_bw40;
+ else if (width == NL80211_CHAN_WIDTH_80)
+ delta = txp.delta_bw80;
+
+ if (txp.target_power > dev->txpower_conf)
+ delta -= txp.target_power - dev->txpower_conf;
+
+ mt76x2_get_rate_power(dev, &t, chan);
+ mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
+ txp.chain[0].delta);
+ mt76x2_limit_rate_power(&t, dev->txpower_conf);
+ dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+ mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
+ txp.chain[0].delta + delta));
+ dev->target_power = txp.chain[0].target_power;
+ dev->target_power_delta[0] = txp.chain[0].delta + delta;
+ dev->target_power_delta[1] = txp.chain[1].delta + delta;
+ dev->rate_power = t;
+
+ txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
+ txp.chain[0].delta + delta);
+
+ txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
+ txp.chain[1].delta + delta);
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+
+static bool
+mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 flag = 0;
+
+ if (!mt76x2_tssi_enabled(dev))
+ return false;
+
+ if (mt76x2_channel_silent(dev))
+ return false;
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ flag |= BIT(0);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ flag |= BIT(8);
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ dev->cal.tssi_cal_done = true;
+ return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+ if (dev->cal.channel_cal_done)
+ return;
+
+ if (mt76x2_channel_silent(dev))
+ return;
+
+ if (!dev->cal.tssi_cal_done)
+ mt76x2_phy_tssi_init_cal(dev);
+
+ if (!mac_stopped)
+ mt76x2_mac_stop(dev, false);
+
+ if (is_5ghz)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+ if (!mac_stopped)
+ mt76x2_mac_resume(dev);
+
+ mt76x2_apply_gain_adj(dev);
+
+ dev->cal.channel_cal_done = true;
+}
+
+static void
+mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+ u32 pa_mode[2];
+ u32 pa_mode_adj;
+
+ if (band == NL80211_BAND_2GHZ) {
+ pa_mode[0] = 0x010055ff;
+ pa_mode[1] = 0x00550055;
+
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+ } else {
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+ }
+ } else {
+ pa_mode[0] = 0x0000ffff;
+ pa_mode[1] = 0x00ff00ff;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+ } else {
+ mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+ mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+ }
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+
+ if (mt76x2_ext_pa_enabled(dev, band))
+ pa_mode_adj = 0x04000000;
+ else
+ pa_mode_adj = 0;
+
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+ mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+ }
+
+ mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+ mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ u32 val;
+
+ if (band == NL80211_BAND_2GHZ)
+ val = 0x3c3c023c;
+ else
+ val = 0x363c023c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+ } else {
+ if (band == NL80211_BAND_2GHZ) {
+ u32 val = 0x0f3c3c3c;
+
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+ } else {
+ mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+ mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+ mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+ }
+ }
+}
+
+static void
+mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
+{
+ u32 cfg0, cfg1;
+
+ if (mt76x2_ext_pa_enabled(dev, band)) {
+ cfg0 = bw ? 0x000b0c01 : 0x00101101;
+ cfg1 = 0x00011414;
+ } else {
+ cfg0 = bw ? 0x000b0b01 : 0x00101001;
+ cfg1 = 0x00021414;
+ }
+ mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+ mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+ mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
+}
+
+static void
+mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+static void
+mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+
+static void
+mt76x2_set_rx_chains(struct mt76x2_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~(BIT(3) | BIT(4));
+
+ if (dev->chainmask & BIT(1))
+ val |= BIT(3);
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_set_tx_dac(struct mt76x2_dev *dev)
+{
+ if (dev->chainmask & BIT(1))
+ mt76_set(dev, MT_BBP(TXBE, 5), 3);
+ else
+ mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
+{
+ dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+ dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+{
+ u32 val;
+ u8 gain_val[2];
+
+ gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+ if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1e42 << 16;
+ else
+ val = 0x1836 << 16;
+
+ val |= 0xf8;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+ mt76_wr(dev, MT_BBP(AGC, 9),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+ if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
+{
+ u32 false_cca;
+ u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+ dev->cal.agc_gain_adjust += 2;
+ else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+ dev->cal.agc_gain_adjust -= 2;
+ else
+ return;
+
+ mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
+ int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
+ int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
+ u8 *gain = dev->cal.agc_gain_init;
+ u8 gain_delta;
+ int low_gain;
+
+ dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
+ dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+ dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
+ dev->cal.avg_rssi[1]) / 512;
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+ if (dev->cal.low_gain == low_gain) {
+ mt76x2_phy_adjust_vga_gain(dev);
+ return;
+ }
+
+ dev->cal.low_gain = low_gain;
+
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+ else
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+
+ if (low_gain) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+ if (mt76x2_has_ext_lna(dev))
+ gain_delta = 10;
+ else
+ gain_delta = 14;
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+ else
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+ gain_delta = 0;
+ }
+
+ dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+ dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+ dev->cal.agc_gain_adjust = 0;
+ mt76x2_phy_set_gain_val(dev);
+}
+
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *chan = chandef->chan;
+ bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ enum nl80211_band band = chan->band;
+ u8 channel;
+
+ u32 ext_cca_chan[4] = {
+ [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+ [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+ [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+ [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+ FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+ };
+ int ch_group_index;
+ u8 bw, bw_index;
+ int freq, freq1;
+ int ret;
+
+ dev->cal.channel_cal_done = false;
+ freq = chandef->chan->center_freq;
+ freq1 = chandef->center_freq1;
+ channel = chan->hw_value;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ if (freq1 > freq) {
+ bw_index = 1;
+ ch_group_index = 0;
+ } else {
+ bw_index = 3;
+ ch_group_index = 1;
+ }
+ channel += 2 - ch_group_index * 4;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_group_index = (freq - freq1 + 30) / 20;
+ if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+ ch_group_index = 0;
+ bw = 2;
+ bw_index = ch_group_index;
+ channel += 6 - ch_group_index * 4;
+ break;
+ default:
+ bw = 0;
+ bw_index = 0;
+ ch_group_index = 0;
+ break;
+ }
+
+ mt76x2_read_rx_gain(dev);
+ mt76x2_phy_set_txpower_regs(dev, band);
+ mt76x2_configure_tx_delay(dev, band, bw);
+ mt76x2_phy_set_txpower(dev);
+
+ mt76x2_set_rx_chains(dev);
+ mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x2_set_tx_dac(dev);
+
+ mt76_rmw(dev, MT_EXT_CCA_CFG,
+ (MT_EXT_CCA_CFG_CCA0 |
+ MT_EXT_CCA_CFG_CCA1 |
+ MT_EXT_CCA_CFG_CCA2 |
+ MT_EXT_CCA_CFG_CCA3 |
+ MT_EXT_CCA_CFG_CCA_MASK),
+ ext_cca_chan[ch_group_index]);
+
+ ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+ /* Enable LDPC Rx */
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+ if (!dev->cal.init_cal_done) {
+ u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+ if (val != 0xff)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+ }
+
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+ /* Rx LPF calibration */
+ if (!dev->cal.init_cal_done)
+ mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+ dev->cal.init_cal_done = true;
+
+ mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+ mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+ mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+ if (scan)
+ return 0;
+
+ dev->cal.low_gain = -1;
+ mt76x2_phy_channel_calibrate(dev, true);
+ mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+
+ return 0;
+}
+
+static void
+mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
+
+ if (!dev->cal.tssi_cal_done)
+ return;
+
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
+
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
+
+ if (mt76x2_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
+
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2_mcu_tssi_comp(dev, &t);
+
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
+
+ usleep_range(10000, 20000);
+ mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+ dev->cal.dpd_cal_done = true;
+ }
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+{
+ struct mt76x2_temp_comp t;
+ int temp, db_diff;
+
+ if (mt76x2_get_temp_comp(dev, &t))
+ return;
+
+ temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+ temp -= t.temp_25_ref;
+ temp = (temp * 1789) / 1000 + 25;
+ dev->cal.temp = temp;
+
+ if (temp > 25)
+ db_diff = (temp - 25) / t.high_slope;
+ else
+ db_diff = (25 - temp) / t.low_slope;
+
+ db_diff = min(db_diff, t.upper_bound);
+ db_diff = max(db_diff, t.lower_bound);
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ db_diff * 2);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+ db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+ struct mt76x2_dev *dev;
+
+ dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ mt76x2_phy_channel_calibrate(dev, false);
+ mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_temp_compensate(dev);
+ mt76x2_phy_update_channel_gain(dev);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x2_dev *dev)
+{
+ int ret;
+
+ ret = mt76x2_mcu_set_radio_state(dev, true);
+ if (ret)
+ return ret;
+
+ mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
new file mode 100644
index 000000000000..ce3ab85c8b0f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_REGS_H
+#define __MT76x2_REGS_H
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG 0x110c
+#define MT_CH_TIME_CFG_TIMER_EN BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER 0x1110
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_CH_IDLE 0x1130
+#define MT_CH_BUSY 0x1134
+#define MT_EXT_CH_BUSY 0x1138
+#define MT_ED_CCA_TIMER 0x1140
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_VHT_HT_FBK_CFG1 0x1358
+
+#define MT_PROT_CFG_RATE GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL GENMASK(17, 16)
+#define MT_PROT_CFG_NAV GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH BIT(26)
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3 0x13ac
+#define MT_TX_ALC_CFG_4 0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31)
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_HT_CTRL_CFG 0x1410
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ GENMASK(27, 24)
+
+#define MT_RX_STAT_0 0x1700
+#define MT_RX_STAT_0_CRC_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_1 0x1704
+#define MT_RX_STAT_1_CCA_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS GENMASK(31, 16)
+
+#define MT_RX_STAT_2 0x1708
+#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
+
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE 0x1c00
+#define MT_WCID_TX_RATE(_i) (MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+ u8 macaddr[6];
+ __le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+ u8 key[16];
+ u8 tx_mic[8];
+ u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x2_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
new file mode 100644
index 000000000000..a09f117848d6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x2_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
new file mode 100644
index 000000000000..4cd424148d4b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x2_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x2.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x2
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+ TP_PROTO(struct mt76x2_dev *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ ),
+ TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+ TP_PROTO(struct mt76x2_dev *dev),
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+ TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+ TP_PROTO(struct mt76x2_dev *dev,
+ struct mt76x2_tx_status *stat),
+
+ TP_ARGS(dev, stat),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ __field(bool, success)
+ __field(bool, aggr)
+ __field(bool, ack_req)
+ __field(u16, rate)
+ __field(u8, retry)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->success = stat->success;
+ __entry->aggr = stat->aggr;
+ __entry->ack_req = stat->ack_req;
+ __entry->wcid = stat->wcid;
+ __entry->pktid = stat->pktid;
+ __entry->rate = stat->rate;
+ __entry->retry = stat->retry;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT
+ " success:%d aggr:%d ack_req:%d"
+ " rate:%04x retry:%d",
+ DEV_PR_ARG, TXID_PR_ARG,
+ __entry->success, __entry->aggr, __entry->ack_req,
+ __entry->rate, __entry->retry
+ )
+);
+
+
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x2_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
new file mode 100644
index 000000000000..534e4bf9a34c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+struct beacon_bc_data {
+ struct mt76x2_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x2_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->global_wcid;
+
+ if (control->sta) {
+ struct mt76x2_sta *msta;
+
+ msta = (struct mt76x2_sta *) control->sta->drv_priv;
+ wcid = &msta->wcid;
+ }
+
+ if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
+ struct mt76x2_vif *mvif;
+
+ mvif = (struct mt76x2_vif *) vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(mt76_hw(dev), skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(mt76_hw(dev), skb);
+ }
+}
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+ struct mt76_rate_power *rp = &dev->rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+ dev->rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+
+static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int qsel = MT_QSEL_EDCA;
+ int ret;
+
+ if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
+
+ mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+
+ ret = mt76x2_insert_hdr_pad(skb);
+ if (ret < 0)
+ return ret;
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ qsel = MT_QSEL_MGMT;
+
+ *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ *tx_info |= MT_TXD_INFO_WIV;
+
+ return 0;
+}
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x2_dev *dev = data->dev;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+ struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nframes;
+
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x2_update_beacon_iter, dev);
+
+ do {
+ nframes = skb_queue_len(&data.q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x2_add_buffered_bc, &data);
+ } while (nframes != skb_queue_len(&data.q));
+
+ if (!nframes)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+ mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+ }
+ spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 000000000000..ea4ab8729ae4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 000000000000..ea30895933c5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+ TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 000000000000..4eef69bd8a9e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+ dma_addr_t addr;
+ int size;
+
+ size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+ t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ t->dma_addr = addr;
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock_bh(&dev->lock);
+ if (!list_empty(&dev->txwi_cache)) {
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock_bh(&dev->lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock_bh(&dev->lock);
+ list_add(&t->list, &dev->txwi_cache);
+ spin_unlock_bh(&dev->lock);
+}
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ while ((t = __mt76_get_txwi(dev)) != NULL)
+ dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+ if (!txq->sta)
+ return MT_TXQ_BE;
+
+ return txq->ac;
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_queue_entry e;
+ struct mt76_txwi_cache *t;
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *iter;
+ dma_addr_t addr;
+ int len;
+ u32 tx_info = 0;
+ int n, ret;
+
+ t = mt76_get_txwi(dev);
+ if (!t) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+ &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto free;
+
+ len = skb->len - skb->data_len;
+ addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ n = 0;
+ buf[n].addr = t->dma_addr;
+ buf[n++].len = dev->drv->txwi_size;
+ buf[n].addr = addr;
+ buf[n++].len = len;
+
+ skb_walk_frags(skb, iter) {
+ if (n == ARRAY_SIZE(buf))
+ goto unmap;
+
+ addr = dma_map_single(dev->dev, iter->data, iter->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ goto unmap;
+
+ buf[n].addr = addr;
+ buf[n++].len = iter->len;
+ }
+
+ if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ goto unmap;
+
+ return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+ ret = -ENOMEM;
+ for (n--; n > 0; n--)
+ dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+ DMA_TO_DEVICE);
+
+free:
+ e.skb = skb;
+ e.txwi = t;
+ dev->drv->tx_complete_skb(dev, q, &e, true);
+ mt76_put_txwi(dev, t);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q;
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ q = &dev->q_tx[qid];
+
+ spin_lock_bh(&q->lock);
+ mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->kick(dev, q);
+
+ if (q->queued > q->ndesc - 8)
+ ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mtxq->retry_q);
+ if (skb) {
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (ps && skb_queue_empty(&mtxq->retry_q))
+ ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+ return skb;
+ }
+
+ skb = ieee80211_tx_dequeue(dev->hw, txq);
+ if (!skb)
+ return NULL;
+
+ return skb;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ struct sk_buff *skb, bool last)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+ info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+ if (last)
+ info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+ mt76_skb_set_moredata(skb, !last);
+ mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ u16 tids, int nframes,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct sk_buff *last_skb = NULL;
+ struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ int i;
+
+ spin_lock_bh(&hwq->lock);
+ for (i = 0; tids && nframes; i++, tids >>= 1) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct sk_buff *skb;
+
+ if (!(tids & 1))
+ continue;
+
+ do {
+ skb = mt76_txq_dequeue(dev, mtxq, true);
+ if (!skb)
+ break;
+
+ if (mtxq->aggr)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ nframes--;
+ if (last_skb)
+ mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+ last_skb = skb;
+ } while (nframes);
+ }
+
+ if (last_skb) {
+ mt76_queue_ps_skb(dev, sta, last_skb, true);
+ dev->queue_ops->kick(dev, hwq);
+ }
+ spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+ struct mt76_txq *mtxq, bool *empty)
+{
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ struct mt76_wcid *wcid = mtxq->wcid;
+ struct sk_buff *skb;
+ int n_frames = 1, limit;
+ struct ieee80211_tx_rate tx_rate;
+ bool ampdu;
+ bool probe;
+ int idx;
+
+ skb = mt76_txq_dequeue(dev, mtxq, false);
+ if (!skb) {
+ *empty = true;
+ return 0;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
+ tx_rate = info->control.rates[0];
+
+ probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+ ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+ limit = ampdu ? 16 : 3;
+
+ if (ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+
+ if (idx < 0)
+ return idx;
+
+ do {
+ bool cur_ampdu;
+
+ if (probe)
+ break;
+
+ if (test_bit(MT76_SCANNING, &dev->state) ||
+ test_bit(MT76_RESET, &dev->state))
+ return -EBUSY;
+
+ skb = mt76_txq_dequeue(dev, mtxq, false);
+ if (!skb) {
+ *empty = true;
+ break;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+ if (ampdu != cur_ampdu ||
+ (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ skb_queue_tail(&mtxq->retry_q, skb);
+ break;
+ }
+
+ info->control.rates[0] = tx_rate;
+
+ if (cur_ampdu)
+ mt76_check_agg_ssn(mtxq, skb);
+
+ idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ if (idx < 0)
+ return idx;
+
+ n_frames++;
+ } while (n_frames < limit);
+
+ if (!probe) {
+ hwq->swq_queued++;
+ hwq->entry[idx].schedule = true;
+ }
+
+ dev->queue_ops->kick(dev, hwq);
+
+ return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+ struct mt76_txq *mtxq, *mtxq_last;
+ int len = 0;
+
+restart:
+ mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
+ while (!list_empty(&hwq->swq)) {
+ bool empty = false;
+ int cur;
+
+ mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+ if (mtxq->send_bar && mtxq->aggr) {
+ struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct ieee80211_vif *vif = txq->vif;
+ u16 agg_ssn = mtxq->agg_ssn;
+ u8 tid = txq->tid;
+
+ mtxq->send_bar = false;
+ spin_unlock_bh(&hwq->lock);
+ ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+ spin_lock_bh(&hwq->lock);
+ goto restart;
+ }
+
+ list_del_init(&mtxq->list);
+
+ cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+ if (!empty)
+ list_add_tail(&mtxq->list, &hwq->swq);
+
+ if (cur < 0)
+ return cur;
+
+ len += cur;
+
+ if (mtxq == mtxq_last)
+ break;
+ }
+
+ return len;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+ int len;
+
+ do {
+ if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+ break;
+
+ len = mt76_txq_schedule_list(dev, hwq);
+ } while (len > 0);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+ int i;
+
+ for (i = 0; i <= MT_TXQ_BK; i++) {
+ struct mt76_queue *q = &dev->q_tx[i];
+
+ spin_lock_bh(&q->lock);
+ mt76_txq_schedule(dev, q);
+ spin_unlock_bh(&q->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool send_bar)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+ spin_lock_bh(&mtxq->hwq->lock);
+ mtxq->send_bar = mtxq->aggr && send_bar;
+ if (!list_empty(&mtxq->list))
+ list_del_init(&mtxq->list);
+ spin_unlock_bh(&mtxq->hwq->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct mt76_dev *dev = hw->priv;
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct mt76_queue *hwq = mtxq->hwq;
+
+ spin_lock_bh(&hwq->lock);
+ if (list_empty(&mtxq->list))
+ list_add_tail(&mtxq->list, &hwq->swq);
+ mt76_txq_schedule(dev, hwq);
+ spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+ struct mt76_queue *hwq;
+ struct sk_buff *skb;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ hwq = mtxq->hwq;
+
+ spin_lock_bh(&hwq->lock);
+ if (!list_empty(&mtxq->list))
+ list_del(&mtxq->list);
+ spin_unlock_bh(&hwq->lock);
+
+ while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+ ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+ INIT_LIST_HEAD(&mtxq->list);
+ skb_queue_head_init(&mtxq->retry_q);
+
+ mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 000000000000..0c35b8db58cd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = dev->bus->rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ cur = dev->bus->rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ usleep_range(10000, 20000);
+ } while (timeout-- > 0);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+ int i, idx = 0, cur;
+
+ for (i = 0; i < size / BITS_PER_LONG; i++) {
+ idx = ffs(~mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ cur = i * BITS_PER_LONG + idx;
+ if (cur >= size)
+ break;
+
+ mask[i] |= BIT(idx);
+ return cur;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 000000000000..018d475504a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+ _var = (((_var) + 1) % _size)
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+ mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (enable)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 6711e7fb6926..0398bece5782 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -81,6 +81,41 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
};
static int
+qtnf_validate_iface_combinations(struct wiphy *wiphy,
+ struct qtnf_vif *change_vif,
+ enum nl80211_iftype new_type)
+{
+ struct qtnf_wmac *mac;
+ struct qtnf_vif *vif;
+ int i;
+ int ret = 0;
+ struct iface_combination_params params = {
+ .num_different_channels = 1,
+ };
+
+ mac = wiphy_priv(wiphy);
+ if (!mac)
+ return -EFAULT;
+
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ vif = &mac->iflist[i];
+ if (vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ params.iftype_num[vif->wdev.iftype]++;
+ }
+
+ if (change_vif) {
+ params.iftype_num[new_type]++;
+ params.iftype_num[change_vif->wdev.iftype]--;
+ } else {
+ params.iftype_num[new_type]++;
+ }
+
+ ret = cfg80211_check_combinations(wiphy, &params);
+
+ return ret;
+}
+
+static int
qtnf_change_virtual_intf(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type,
@@ -90,6 +125,13 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
u8 *mac_addr;
int ret;
+ ret = qtnf_validate_iface_combinations(wiphy, vif, type);
+ if (ret) {
+ pr_err("VIF%u.%u combination check: failed to set type %d\n",
+ vif->mac->macid, vif->vifid, type);
+ return ret;
+ }
+
if (params)
mac_addr = params->macaddr;
else
@@ -120,10 +162,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
qtnf_scan_done(vif->mac, true);
- if (qtnf_cmd_send_del_intf(vif))
- pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
- vif->vifid);
-
/* Stop data */
netif_tx_stop_all_queues(netdev);
if (netif_carrier_ok(netdev))
@@ -132,6 +170,10 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(netdev);
+ if (qtnf_cmd_send_del_intf(vif))
+ pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
+ vif->vifid);
+
vif->netdev->ieee80211_ptr = NULL;
vif->netdev = NULL;
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
@@ -150,12 +192,20 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
u8 *mac_addr = NULL;
+ int ret;
mac = wiphy_priv(wiphy);
if (!mac)
return ERR_PTR(-EFAULT);
+ ret = qtnf_validate_iface_combinations(wiphy, NULL, type);
+ if (ret) {
+ pr_err("MAC%u invalid combination: failed to add type %d\n",
+ mac->macid, type);
+ return ERR_PTR(ret);
+ }
+
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
@@ -190,7 +240,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
goto err_mac;
}
- if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) {
+ if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
goto err_net;
@@ -381,6 +431,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
u32 short_cookie = prandom_u32();
u16 flags = 0;
+ u16 freq;
*cookie = short_cookie;
@@ -393,13 +444,21 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (params->dont_wait_for_ack)
flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
+ /* If channel is not specified, pass "freq = 0" to tell device
+ * firmware to use current channel.
+ */
+ if (params->chan)
+ freq = params->chan->center_freq;
+ else
+ freq = 0;
+
pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
- wdev->netdev->name, params->chan->center_freq,
+ wdev->netdev->name, freq,
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
params->len, short_cookie, flags);
return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
- params->chan->center_freq,
+ freq,
params->buf, params->len);
}
@@ -409,6 +468,7 @@ qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ sinfo->generation = vif->generation;
return qtnf_cmd_get_sta_info(vif, mac, sinfo);
}
@@ -430,11 +490,13 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
if (unlikely(ret == -ENOENT)) {
- qtnf_sta_list_del(&vif->sta_list, mac);
+ qtnf_sta_list_del(vif, mac);
cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
sinfo->filled = 0;
}
+ sinfo->generation = vif->generation;
+
return ret;
}
@@ -533,19 +595,13 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static void qtnf_scan_timeout(struct timer_list *t)
-{
- struct qtnf_wmac *mac = from_timer(mac, t, scan_timeout);
-
- pr_warn("mac%d scan timed out\n", mac->macid);
- qtnf_scan_done(mac, true);
-}
-
static int
qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ cancel_delayed_work_sync(&mac->scan_timeout);
+
mac->scan_req = request;
if (qtnf_cmd_send_scan(mac)) {
@@ -554,9 +610,8 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EFAULT;
}
- mac->scan_timeout.function = qtnf_scan_timeout;
- mod_timer(&mac->scan_timeout,
- jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
+ queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
+ QTNF_SCAN_TIMEOUT_SEC * HZ);
return 0;
}
@@ -617,7 +672,6 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
- vif->sta_state = QTNF_STA_DISCONNECTED;
return 0;
}
@@ -717,7 +771,8 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
}
if (!cfg80211_chandef_valid(chandef)) {
- pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name,
+ pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ ndev->name, chandef->chan->center_freq,
chandef->center_freq1, chandef->center_freq2,
chandef->width);
ret = -ENODATA;
@@ -750,6 +805,35 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static int qtnf_start_radar_detection(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ int ret;
+
+ ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
+ if (ret)
+ pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
+
+ return ret;
+}
+
+static int qtnf_set_mac_acl(struct wiphy *wiphy,
+ struct net_device *dev,
+ const struct cfg80211_acl_data *params)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ ret = qtnf_cmd_set_mac_acl(vif, params);
+ if (ret)
+ pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret);
+
+ return ret;
+}
+
static struct cfg80211_ops qtn_cfg80211_ops = {
.add_virtual_intf = qtnf_add_virtual_intf,
.change_virtual_intf = qtnf_change_virtual_intf,
@@ -773,7 +857,9 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.disconnect = qtnf_disconnect,
.dump_survey = qtnf_dump_survey,
.get_channel = qtnf_get_channel,
- .channel_switch = qtnf_channel_switch
+ .channel_switch = qtnf_channel_switch,
+ .start_radar_detection = qtnf_start_radar_detection,
+ .set_mac_acl = qtnf_set_mac_acl,
};
static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -802,6 +888,9 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
continue;
mac = bus->mac[mac_idx];
+ if (!mac)
+ continue;
+
wiphy = priv_to_wiphy(mac);
for (band = 0; band < NUM_NL80211_BANDS; ++band) {
@@ -829,29 +918,29 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
return wiphy;
}
-static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
- struct ieee80211_iface_combination *if_comb,
- const struct qtnf_mac_info *mac_info)
+static int
+qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info)
{
- size_t max_interfaces = 0;
+ struct ieee80211_iface_combination *if_comb;
+ size_t n_if_comb;
u16 interface_modes = 0;
- size_t i;
+ size_t i, j;
+
+ if_comb = mac_info->if_comb;
+ n_if_comb = mac_info->n_if_comb;
- if (unlikely(!mac_info->limits || !mac_info->n_limits))
+ if (!if_comb || !n_if_comb)
return -ENOENT;
- if_comb->limits = mac_info->limits;
- if_comb->n_limits = mac_info->n_limits;
+ for (i = 0; i < n_if_comb; i++) {
+ if_comb[i].radar_detect_widths = mac_info->radar_detect_widths;
- for (i = 0; i < mac_info->n_limits; i++) {
- max_interfaces += mac_info->limits[i].max;
- interface_modes |= mac_info->limits[i].types;
+ for (j = 0; j < if_comb[i].n_limits; j++)
+ interface_modes |= if_comb[i].limits[j].types;
}
- if_comb->num_different_channels = 1;
- if_comb->beacon_int_infra_match = true;
- if_comb->max_interfaces = max_interfaces;
- if_comb->radar_detect_widths = mac_info->radar_detect_widths;
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = n_if_comb;
wiphy->interface_modes = interface_modes;
return 0;
@@ -860,7 +949,6 @@ static int qtnf_wiphy_setup_if_comb(struct wiphy *wiphy,
int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
- struct ieee80211_iface_combination *iface_comb = NULL;
int ret;
if (!wiphy) {
@@ -868,14 +956,6 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
return -EFAULT;
}
- iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL);
- if (!iface_comb)
- return -ENOMEM;
-
- ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo);
- if (ret)
- goto out;
-
wiphy->frag_threshold = mac->macinfo.frag_thr;
wiphy->rts_threshold = mac->macinfo.rts_thr;
wiphy->retry_short = mac->macinfo.sretry_limit;
@@ -886,11 +966,13 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
-
- wiphy->iface_combinations = iface_comb;
- wiphy->n_iface_combinations = 1;
+ wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs;
wiphy->max_num_csa_counters = 2;
+ ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo);
+ if (ret)
+ goto out;
+
/* Initialize cipher suits */
wiphy->cipher_suites = qtnf_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(qtnf_cipher_suites);
@@ -924,6 +1006,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
+ strlcpy(wiphy->fw_version, hw_info->fw_version,
+ sizeof(wiphy->fw_version));
+ wiphy->hw_version = hw_info->hw_version;
+
ret = wiphy_register(wiphy);
if (ret < 0)
goto out;
@@ -935,12 +1021,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
out:
- if (ret) {
- kfree(iface_comb);
- return ret;
- }
-
- return 0;
+ return ret;
}
void qtnf_netdev_updown(struct net_device *ndev, bool up)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 66db26613b1f..b73425122a10 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -28,23 +28,4 @@ void qtnf_band_init_rates(struct ieee80211_supported_band *band);
void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
struct ieee80211_supported_band *band);
-static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
-{
- struct cfg80211_scan_info info = {
- .aborted = aborted,
- };
-
- if (timer_pending(&mac->scan_timeout))
- del_timer_sync(&mac->scan_timeout);
-
- mutex_lock(&mac->mac_lock);
-
- if (mac->scan_req) {
- cfg80211_scan_done(mac->scan_req, &info);
- mac->scan_req = NULL;
- }
-
- mutex_unlock(&mac->mac_lock);
-}
-
#endif /* _QTN_FMAC_CFG80211_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 8bc8dd637315..deca0060eb27 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -162,6 +162,14 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
memcpy(tlv->ie_data, buf, len);
}
+static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
+{
+ size_t size = sizeof(struct qlink_acl_data) +
+ acl->n_acl_entries * sizeof(struct qlink_mac_address);
+
+ return size;
+}
+
static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -178,6 +186,10 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
if (cfg80211_chandef_valid(&s->chandef))
len += sizeof(struct qlink_tlv_chandef);
+ if (s->acl)
+ len += sizeof(struct qlink_tlv_hdr) +
+ qtnf_cmd_acl_data_size(s->acl);
+
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
vif->mac->macid, vif->vifid, len);
@@ -203,7 +215,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_START_AP,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_start_ap *)cmd_skb->data;
@@ -247,7 +259,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF);
chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) -
sizeof(chtlv->hdr));
- qlink_chandef_cfg2q(&s->chandef, &chtlv->chan);
+ qlink_chandef_cfg2q(&s->chandef, &chtlv->chdef);
}
qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD,
@@ -283,6 +295,16 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
}
+ if (s->acl) {
+ size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+ struct qlink_tlv_hdr *tlv =
+ skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+
+ tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+ tlv->len = cpu_to_le16(acl_size);
+ qlink_acl_data_cfg2q(s->acl, (struct qlink_acl_data *)tlv->val);
+ }
+
qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -313,7 +335,7 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_STOP_AP,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -347,7 +369,7 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_REGISTER_MGMT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -390,7 +412,7 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SEND_MGMT_FRAME,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -436,7 +458,7 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_MGMT_SET_APPIE,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
@@ -461,30 +483,8 @@ out:
}
static void
-qtnf_sta_info_parse_basic_counters(struct station_info *sinfo,
- const struct qlink_sta_stat_basic_counters *counters)
-{
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) |
- BIT(NL80211_STA_INFO_TX_BYTES);
- sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes);
- sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
- BIT(NL80211_STA_INFO_TX_PACKETS) |
- BIT(NL80211_STA_INFO_BEACON_RX);
- sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets);
- sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets);
- sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons);
-
- sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) |
- BIT(NL80211_STA_INFO_TX_FAILED);
- sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped);
- sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed);
-}
-
-static void
qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
- const struct qlink_sta_info_rate *rate_src)
+ const struct qlink_sta_info_rate *rate_src)
{
rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10;
@@ -493,22 +493,23 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags = 0;
switch (rate_src->bw) {
- case QLINK_STA_INFO_RATE_BW_5:
+ case QLINK_CHAN_WIDTH_5:
rate_dst->bw = RATE_INFO_BW_5;
break;
- case QLINK_STA_INFO_RATE_BW_10:
+ case QLINK_CHAN_WIDTH_10:
rate_dst->bw = RATE_INFO_BW_10;
break;
- case QLINK_STA_INFO_RATE_BW_20:
+ case QLINK_CHAN_WIDTH_20:
+ case QLINK_CHAN_WIDTH_20_NOHT:
rate_dst->bw = RATE_INFO_BW_20;
break;
- case QLINK_STA_INFO_RATE_BW_40:
+ case QLINK_CHAN_WIDTH_40:
rate_dst->bw = RATE_INFO_BW_40;
break;
- case QLINK_STA_INFO_RATE_BW_80:
+ case QLINK_CHAN_WIDTH_80:
rate_dst->bw = RATE_INFO_BW_80;
break;
- case QLINK_STA_INFO_RATE_BW_160:
+ case QLINK_CHAN_WIDTH_160:
rate_dst->bw = RATE_INFO_BW_160;
break;
default:
@@ -578,87 +579,125 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
}
static void
-qtnf_sta_info_parse_generic_info(struct station_info *sinfo,
- const struct qlink_sta_info_generic *info)
+qtnf_cmd_sta_info_parse(struct station_info *sinfo,
+ const struct qlink_tlv_hdr *tlv,
+ size_t resp_size)
{
- sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) |
- BIT(NL80211_STA_INFO_INACTIVE_TIME);
- sinfo->connected_time = get_unaligned_le32(&info->connected_time);
- sinfo->inactive_time = get_unaligned_le32(&info->inactive_time);
+ const struct qlink_sta_stats *stats = NULL;
+ const u8 *map = NULL;
+ unsigned int map_len = 0;
+ unsigned int stats_len = 0;
+ u16 tlv_len;
+
+#define qtnf_sta_stat_avail(stat_name, bitn) \
+ (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+ (offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
+
+ while (resp_size >= sizeof(*tlv)) {
+ tlv_len = le16_to_cpu(tlv->len);
+
+ switch (le16_to_cpu(tlv->type)) {
+ case QTN_TLV_ID_STA_STATS_MAP:
+ map_len = tlv_len;
+ map = tlv->val;
+ break;
+ case QTN_TLV_ID_STA_STATS:
+ stats_len = tlv_len;
+ stats = (const struct qlink_sta_stats *)tlv->val;
+ break;
+ default:
+ break;
+ }
- sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
- BIT(NL80211_STA_INFO_SIGNAL_AVG);
- sinfo->signal = info->rssi - 120;
- sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET;
+ resp_size -= tlv_len + sizeof(*tlv);
+ tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+ }
- if (info->rx_rate.rate) {
+ if (!map || !stats)
+ return;
+
+ if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+ sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
+ }
+
+ if (qtnf_sta_stat_avail(connected_time,
+ QLINK_STA_INFO_CONNECTED_TIME)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+ sinfo->connected_time = le32_to_cpu(stats->connected_time);
+ }
+
+ if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
+ }
+
+ if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
+ }
+
+ if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
- qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate);
+ qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
}
- if (info->tx_rate.rate) {
+ if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
- qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate);
+ qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
}
- sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
- qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state);
-}
+ if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+ qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
+ }
-static int qtnf_cmd_sta_info_parse(struct station_info *sinfo,
- const u8 *payload, size_t payload_size)
-{
- const struct qlink_sta_stat_basic_counters *counters;
- const struct qlink_sta_info_generic *sta_info;
- u16 tlv_type;
- u16 tlv_value_len;
- size_t tlv_full_len;
- const struct qlink_tlv_hdr *tlv;
+ if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+ sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ }
- sinfo->filled = 0;
+ if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+ sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ }
- tlv = (const struct qlink_tlv_hdr *)payload;
- while (payload_size >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_size) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
- switch (tlv_type) {
- case QTN_TLV_ID_STA_BASIC_COUNTERS:
- if (unlikely(tlv_value_len < sizeof(*counters))) {
- pr_err("invalid TLV size %.4X: %u\n",
- tlv_type, tlv_value_len);
- break;
- }
+ if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+ sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ }
- counters = (void *)tlv->val;
- qtnf_sta_info_parse_basic_counters(sinfo, counters);
- break;
- case QTN_TLV_ID_STA_GENERIC_INFO:
- if (unlikely(tlv_value_len < sizeof(*sta_info)))
- break;
+ if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+ sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ }
- sta_info = (void *)tlv->val;
- qtnf_sta_info_parse_generic_info(sinfo, sta_info);
- break;
- default:
- pr_warn("unexpected TLV type: %.4X\n", tlv_type);
- break;
- }
- payload_size -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+ if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
}
- if (payload_size) {
- pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size);
- return -EINVAL;
+ if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
}
- return 0;
+ if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+ sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
+ }
+
+ if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+ sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
+ }
+
+ if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+ sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
+ }
+
+#undef qtnf_sta_stat_avail
}
int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -674,8 +713,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_GET_STA_INFO,
sizeof(*cmd));
-
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -715,7 +753,9 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
goto out;
}
- ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
+ qtnf_cmd_sta_info_parse(sinfo,
+ (const struct qlink_tlv_hdr *)resp->info,
+ var_resp_len);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -738,7 +778,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
cmd_type,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -811,7 +851,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_INTF,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -908,6 +948,16 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
struct qtnf_hw_info *hwinfo = &bus->hw_info;
const struct qlink_tlv_hdr *tlv;
const struct qlink_tlv_reg_rule *tlv_rule;
+ const char *bld_name = NULL;
+ const char *bld_rev = NULL;
+ const char *bld_type = NULL;
+ const char *bld_label = NULL;
+ u32 bld_tmstamp = 0;
+ u32 plat_id = 0;
+ const char *hw_id = NULL;
+ const char *calibration_ver = NULL;
+ const char *uboot_ver = NULL;
+ u32 hw_ver = 0;
struct ieee80211_reg_rule *rule;
u16 tlv_type;
u16 tlv_value_len;
@@ -934,6 +984,10 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->rd->alpha2[0] = resp->alpha2[0];
hwinfo->rd->alpha2[1] = resp->alpha2[1];
+ bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
+ plat_id = le32_to_cpu(resp->plat_id);
+ hw_ver = le32_to_cpu(resp->hw_ver);
+
switch (resp->dfs_region) {
case QLINK_DFS_FCC:
hwinfo->rd->dfs_region = NL80211_DFS_FCC;
@@ -994,6 +1048,27 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
le32_to_cpu(tlv_rule->flags));
break;
+ case QTN_TLV_ID_BUILD_NAME:
+ bld_name = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_REV:
+ bld_rev = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_TYPE:
+ bld_type = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_BUILD_LABEL:
+ bld_label = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_HW_ID:
+ hw_id = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_CALIBRATION_VER:
+ calibration_ver = (const void *)tlv->val;
+ break;
+ case QTN_TLV_ID_UBOOT_VER:
+ uboot_ver = (const void *)tlv->val;
+ break;
default:
break;
}
@@ -1016,21 +1091,46 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_tx_chain, hwinfo->total_rx_chain,
hwinfo->hw_capab);
+ pr_info("\nBuild name: %s" \
+ "\nBuild revision: %s" \
+ "\nBuild type: %s" \
+ "\nBuild label: %s" \
+ "\nBuild timestamp: %lu" \
+ "\nPlatform ID: %lu" \
+ "\nHardware ID: %s" \
+ "\nCalibration version: %s" \
+ "\nU-Boot version: %s" \
+ "\nHardware version: 0x%08x",
+ bld_name, bld_rev, bld_type, bld_label,
+ (unsigned long)bld_tmstamp,
+ (unsigned long)plat_id,
+ hw_id, calibration_ver, uboot_ver, hw_ver);
+
+ strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+ hwinfo->hw_version = hw_ver;
+
return 0;
}
static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const u8 *tlv_buf, size_t tlv_buf_size)
{
- struct ieee80211_iface_limit *limits = NULL;
- const struct qlink_iface_limit *limit_record;
- size_t record_count = 0, rec = 0;
- u16 tlv_type, tlv_value_len;
- struct qlink_iface_comb_num *comb;
+ struct ieee80211_iface_combination *comb = NULL;
+ size_t n_comb = 0;
+ struct ieee80211_iface_limit *limits;
+ const struct qlink_iface_comb_num *comb_num;
+ const struct qlink_iface_limit_record *rec;
+ const struct qlink_iface_limit *lim;
+ u16 rec_len;
+ u16 tlv_type;
+ u16 tlv_value_len;
size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
-
- mac->macinfo.n_limits = 0;
+ u8 *ext_capa = NULL;
+ u8 *ext_capa_mask = NULL;
+ u8 ext_capa_len = 0;
+ u8 ext_capa_mask_len = 0;
+ int i = 0;
tlv = (const struct qlink_tlv_hdr *)tlv_buf;
while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
@@ -1045,52 +1145,89 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
switch (tlv_type) {
case QTN_TLV_ID_NUM_IFACE_COMB:
- if (unlikely(tlv_value_len != sizeof(*comb)))
+ if (tlv_value_len != sizeof(*comb_num))
return -EINVAL;
- comb = (void *)tlv->val;
- record_count = le16_to_cpu(comb->iface_comb_num);
+ comb_num = (void *)tlv->val;
+
+ /* free earlier iface comb memory */
+ qtnf_mac_iface_comb_free(mac);
- mac->macinfo.n_limits = record_count;
- /* free earlier iface limits memory */
- kfree(mac->macinfo.limits);
- mac->macinfo.limits =
- kzalloc(sizeof(*mac->macinfo.limits) *
- record_count, GFP_KERNEL);
+ mac->macinfo.n_if_comb =
+ le32_to_cpu(comb_num->iface_comb_num);
- if (unlikely(!mac->macinfo.limits))
+ mac->macinfo.if_comb =
+ kcalloc(mac->macinfo.n_if_comb,
+ sizeof(*mac->macinfo.if_comb),
+ GFP_KERNEL);
+
+ if (!mac->macinfo.if_comb)
return -ENOMEM;
- limits = mac->macinfo.limits;
+ comb = mac->macinfo.if_comb;
+
+ pr_debug("MAC%u: %zu iface combinations\n",
+ mac->macid, mac->macinfo.n_if_comb);
+
break;
case QTN_TLV_ID_IFACE_LIMIT:
- if (unlikely(!limits)) {
- pr_warn("MAC%u: limits are not inited\n",
+ if (unlikely(!comb)) {
+ pr_warn("MAC%u: no combinations advertised\n",
mac->macid);
return -EINVAL;
}
- if (unlikely(tlv_value_len != sizeof(*limit_record))) {
- pr_warn("MAC%u: record size mismatch\n",
+ if (n_comb >= mac->macinfo.n_if_comb) {
+ pr_warn("MAC%u: combinations count exceeded\n",
mac->macid);
- return -EINVAL;
+ n_comb++;
+ break;
}
- limit_record = (void *)tlv->val;
- limits[rec].max = le16_to_cpu(limit_record->max_num);
- limits[rec].types = qlink_iface_type_to_nl_mask(
- le16_to_cpu(limit_record->type));
+ rec = (void *)tlv->val;
+ rec_len = sizeof(*rec) + rec->n_limits * sizeof(*lim);
+
+ if (unlikely(tlv_value_len != rec_len)) {
+ pr_warn("MAC%u: record %zu size mismatch\n",
+ mac->macid, n_comb);
+ return -EINVAL;
+ }
- /* supported modes: STA, AP */
- limits[rec].types &= BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_AP_VLAN) |
- BIT(NL80211_IFTYPE_STATION);
+ limits = kzalloc(sizeof(*limits) * rec->n_limits,
+ GFP_KERNEL);
+ if (!limits)
+ return -ENOMEM;
- pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid,
- limits[rec].max, limits[rec].types);
+ comb[n_comb].num_different_channels =
+ rec->num_different_channels;
+ comb[n_comb].max_interfaces =
+ le16_to_cpu(rec->max_interfaces);
+ comb[n_comb].n_limits = rec->n_limits;
+ comb[n_comb].limits = limits;
+
+ for (i = 0; i < rec->n_limits; i++) {
+ lim = &rec->limits[i];
+ limits[i].max = le16_to_cpu(lim->max_num);
+ limits[i].types =
+ qlink_iface_type_to_nl_mask(le16_to_cpu(lim->type));
+ pr_debug("MAC%u: comb[%zu]: MAX:%u TYPES:%.4X\n",
+ mac->macid, n_comb,
+ limits[i].max, limits[i].types);
+ }
- if (limits[rec].types)
- rec++;
+ n_comb++;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ if (unlikely(tlv_value_len > U8_MAX))
+ return -EINVAL;
+ ext_capa = (u8 *)tlv->val;
+ ext_capa_len = tlv_value_len;
+ break;
+ case QTN_TLV_ID_EXT_CAPABILITY_MASK:
+ if (unlikely(tlv_value_len > U8_MAX))
+ return -EINVAL;
+ ext_capa_mask = (u8 *)tlv->val;
+ ext_capa_mask_len = tlv_value_len;
break;
default:
break;
@@ -1106,12 +1243,40 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return -EINVAL;
}
- if (mac->macinfo.n_limits != rec) {
+ if (mac->macinfo.n_if_comb != n_comb) {
pr_err("MAC%u: combination mismatch: reported=%zu parsed=%zu\n",
- mac->macid, mac->macinfo.n_limits, rec);
+ mac->macid, mac->macinfo.n_if_comb, n_comb);
+ return -EINVAL;
+ }
+
+ if (ext_capa_len != ext_capa_mask_len) {
+ pr_err("MAC%u: ext_capa/_mask lengths mismatch: %u != %u\n",
+ mac->macid, ext_capa_len, ext_capa_mask_len);
return -EINVAL;
}
+ if (ext_capa_len > 0) {
+ ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
+ if (!ext_capa)
+ return -ENOMEM;
+
+ ext_capa_mask =
+ kmemdup(ext_capa_mask, ext_capa_mask_len, GFP_KERNEL);
+ if (!ext_capa_mask) {
+ kfree(ext_capa);
+ return -ENOMEM;
+ }
+ } else {
+ ext_capa = NULL;
+ ext_capa_mask = NULL;
+ }
+
+ kfree(mac->macinfo.extended_capabilities);
+ kfree(mac->macinfo.extended_capabilities_mask);
+ mac->macinfo.extended_capabilities = ext_capa;
+ mac->macinfo.extended_capabilities_mask = ext_capa_mask;
+ mac->macinfo.extended_capabilities_len = ext_capa_len;
+
return 0;
}
@@ -1143,6 +1308,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info->radar_detect_widths =
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
+ mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
sizeof(mac_info->ht_cap_mod_mask));
@@ -1186,7 +1352,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
size_t tlv_len;
size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
- const struct qlink_tlv_channel *qchan;
+ const struct qlink_channel *qchan;
struct ieee80211_channel *chan;
unsigned int chidx = 0;
u32 qflags;
@@ -1232,7 +1398,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
switch (tlv_type) {
case QTN_TLV_ID_CHANNEL:
- if (unlikely(tlv_len != sizeof(*qchan))) {
+ if (unlikely(tlv_dlen != sizeof(*qchan))) {
pr_err("invalid channel TLV len %zu\n",
tlv_len);
goto error_ret;
@@ -1243,7 +1409,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
goto error_ret;
}
- qchan = (const struct qlink_tlv_channel *)tlv;
+ qchan = (const struct qlink_channel *)tlv->val;
chan = &band->channels[chidx++];
qflags = le32_to_cpu(qchan->flags);
@@ -1490,7 +1656,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_MAC_INFO,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -1528,7 +1694,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_GET_HW_INFO,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -1708,7 +1874,7 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_FW_INIT,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -1757,7 +1923,7 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_ADD_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1810,7 +1976,7 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1851,7 +2017,7 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SET_DEFAULT_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1886,7 +2052,7 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_SET_DEFAULT_MGMT_KEY,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -1941,28 +2107,24 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CHANGE_STA,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
cmd = (struct qlink_cmd_change_sta *)cmd_skb->data;
ether_addr_copy(cmd->sta_addr, mac);
+ cmd->flag_update.mask =
+ cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_mask));
+ cmd->flag_update.value =
+ cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_set));
switch (vif->wdev.iftype) {
case NL80211_IFTYPE_AP:
cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP);
- cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_mask));
- cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_set));
break;
case NL80211_IFTYPE_STATION:
cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
- cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_mask));
- cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
- params->sta_flags_set));
break;
default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
@@ -1997,7 +2159,7 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DEL_STA,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -2037,8 +2199,8 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
- qchan->center_freq = cpu_to_le16(sc->center_freq);
- qchan->hw_value = cpu_to_le16(sc->hw_value);
+ qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
+ qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
if (sc->flags & IEEE80211_CHAN_NO_IR)
flags |= QLINK_CHAN_NO_IR;
@@ -2046,7 +2208,7 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
if (sc->flags & IEEE80211_CHAN_RADAR)
flags |= QLINK_CHAN_RADAR;
- qchan->flags = cpu_to_le32(flags);
+ qchan->chan.flags = cpu_to_le32(flags);
}
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
@@ -2067,7 +2229,7 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -2137,7 +2299,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CONNECT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_connect *)cmd_skb->data;
@@ -2239,7 +2401,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_DISCONNECT,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
@@ -2273,7 +2435,7 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_UPDOWN_INTF,
sizeof(*cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
cmd = (struct qlink_cmd_updown *)cmd_skb->data;
@@ -2434,8 +2596,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
sizeof(*cmd));
-
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
@@ -2487,7 +2648,7 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CHAN_GET,
sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
+ if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(bus);
@@ -2512,3 +2673,83 @@ out:
consume_skb(resp_skb);
return ret;
}
+
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+ const struct cfg80211_chan_def *chdef,
+ u32 cac_time_ms)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_cmd_start_cac *cmd;
+ int ret;
+ u16 res_code;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_START_CAC,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_start_cac *)cmd_skb->data;
+ cmd->cac_time_ms = cpu_to_le32(cac_time_ms);
+ qlink_chandef_cfg2q(chdef, &cmd->chan);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ qtnf_bus_unlock(bus);
+
+ if (ret)
+ return ret;
+
+ switch (res_code) {
+ case QLINK_CMD_RESULT_OK:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+ const struct cfg80211_acl_data *params)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ struct sk_buff *cmd_skb;
+ struct qlink_tlv_hdr *tlv;
+ size_t acl_size = qtnf_cmd_acl_data_size(params);
+ u16 res_code;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_SET_MAC_ACL,
+ sizeof(struct qlink_cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+ tlv->len = cpu_to_le16(acl_size);
+ qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
+
+ qtnf_bus_lock(bus);
+ ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ qtnf_bus_unlock(bus);
+
+ if (unlikely(ret))
+ return ret;
+
+ switch (res_code) {
+ case QLINK_CMD_RESULT_OK:
+ break;
+ case QLINK_CMD_RESULT_INVALID:
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index d981a76e5835..69a7d56f7e58 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+ const struct cfg80211_chan_def *chdef,
+ u32 cac_time_ms);
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+ const struct cfg80211_acl_data *params);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 3423dc51198b..cf26c15a84f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -119,9 +119,38 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Netdev handler for getting stats.
*/
-static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev)
+static void qtnf_netdev_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
{
- return &dev->stats;
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ unsigned int start;
+ int cpu;
+
+ netdev_stats_to_stats64(stats, &ndev->stats);
+
+ if (!vif->stats64)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats64;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ stats64 = per_cpu_ptr(vif->stats64, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats64->syncp);
+ rx_packets = stats64->rx_packets;
+ rx_bytes = stats64->rx_bytes;
+ tx_packets = stats64->tx_packets;
+ tx_bytes = stats64->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
}
/* Netdev handler for transmission timeout.
@@ -156,7 +185,7 @@ const struct net_device_ops qtnf_netdev_ops = {
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats = qtnf_netdev_get_stats,
+ .ndo_get_stats64 = qtnf_netdev_get_stats64,
};
static int qtnf_mac_init_single_band(struct wiphy *wiphy,
@@ -233,6 +262,23 @@ struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac)
return vif;
}
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
+{
+ struct ieee80211_iface_combination *comb;
+ int i;
+
+ if (mac->macinfo.if_comb) {
+ for (i = 0; i < mac->macinfo.n_if_comb; i++) {
+ comb = &mac->macinfo.if_comb[i];
+ kfree(comb->limits);
+ comb->limits = NULL;
+ }
+
+ kfree(mac->macinfo.if_comb);
+ mac->macinfo.if_comb = NULL;
+ }
+}
+
static void qtnf_vif_reset_handler(struct work_struct *work)
{
struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -258,13 +304,44 @@ static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac)
{
struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
- vif->wdev.iftype = NL80211_IFTYPE_AP;
+ vif->wdev.iftype = NL80211_IFTYPE_STATION;
vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
vif->wdev.wiphy = priv_to_wiphy(mac);
INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler);
vif->cons_tx_timeout_cnt = 0;
}
+static void qtnf_mac_scan_finish(struct qtnf_wmac *mac, bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ mutex_lock(&mac->mac_lock);
+
+ if (mac->scan_req) {
+ cfg80211_scan_done(mac->scan_req, &info);
+ mac->scan_req = NULL;
+ }
+
+ mutex_unlock(&mac->mac_lock);
+}
+
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
+{
+ cancel_delayed_work_sync(&mac->scan_timeout);
+ qtnf_mac_scan_finish(mac, aborted);
+}
+
+static void qtnf_mac_scan_timeout(struct work_struct *work)
+{
+ struct qtnf_wmac *mac =
+ container_of(work, struct qtnf_wmac, scan_timeout.work);
+
+ pr_warn("MAC%d: scan timed out\n", mac->macid);
+ qtnf_mac_scan_finish(mac, true);
+}
+
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
@@ -288,7 +365,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- timer_setup(&mac->scan_timeout, NULL, 0);
+ INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
+ mac->iflist[i].stats64 =
+ netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!mac->iflist[i].stats64)
+ pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
+ macid, i);
}
qtnf_mac_init_primary_intf(mac);
@@ -297,9 +379,12 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
+static const struct ethtool_ops qtnf_ethtool_ops = {
+ .get_drvinfo = cfg80211_get_drvinfo,
+};
+
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
- const char *name, unsigned char name_assign_type,
- enum nl80211_iftype iftype)
+ const char *name, unsigned char name_assign_type)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct net_device *dev;
@@ -320,12 +405,12 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->needs_free_netdev = true;
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &vif->wdev;
- dev->ieee80211_ptr->iftype = iftype;
ether_addr_copy(dev->dev_addr, vif->mac_addr);
SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
+ dev->ethtool_ops = &qtnf_ethtool_ops;
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
@@ -366,6 +451,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
rtnl_unlock();
qtnf_sta_list_free(&vif->sta_list);
+ free_percpu(vif->stats64);
}
if (mac->wiphy_registered)
@@ -382,8 +468,9 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
wiphy->bands[band] = NULL;
}
- kfree(mac->macinfo.limits);
- kfree(wiphy->iface_combinations);
+ qtnf_mac_iface_comb_free(mac);
+ kfree(mac->macinfo.extended_capabilities);
+ kfree(mac->macinfo.extended_capabilities_mask);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
@@ -418,7 +505,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
if (ret) {
pr_err("MAC%u: failed to add VIF\n", macid);
goto error;
@@ -446,8 +533,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
rtnl_lock();
- ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM,
- NL80211_IFTYPE_AP);
+ ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
rtnl_unlock();
if (ret) {
@@ -644,6 +730,46 @@ void qtnf_wake_all_queues(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct pcpu_sw_netstats *stats64;
+
+ if (unlikely(!vif || !vif->stats64)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ return;
+ }
+
+ stats64 = this_cpu_ptr(vif->stats64);
+
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->rx_packets++;
+ stats64->rx_bytes += skb->len;
+ u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_rx_stats);
+
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct pcpu_sw_netstats *stats64;
+
+ if (unlikely(!vif || !vif->stats64)) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ return;
+ }
+
+ stats64 = this_cpu_ptr(vif->stats64);
+
+ u64_stats_update_begin(&stats64->syncp);
+ stats64->tx_packets++;
+ stats64->tx_bytes += skb->len;
+ u64_stats_update_end(&stats64->syncp);
+}
+EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
+
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 1b7bc0318f3e..3b884c80b6ab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -88,6 +88,9 @@ struct qtnf_vif {
struct work_struct reset_work;
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
+ int generation;
+
+ struct pcpu_sw_netstats __percpu *stats64;
};
struct qtnf_mac_info {
@@ -102,10 +105,14 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
+ u32 max_acl_mac_addrs;
struct ieee80211_ht_cap ht_cap_mod_mask;
struct ieee80211_vht_cap vht_cap_mod_mask;
- struct ieee80211_iface_limit *limits;
- size_t n_limits;
+ struct ieee80211_iface_combination *if_comb;
+ size_t n_if_comb;
+ u8 *extended_capabilities;
+ u8 *extended_capabilities_mask;
+ u8 extended_capabilities_len;
};
struct qtnf_chan_stats {
@@ -126,7 +133,7 @@ struct qtnf_wmac {
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
struct mutex mac_lock; /* lock during wmac speicific ops */
- struct timer_list scan_timeout;
+ struct delayed_work scan_timeout;
};
struct qtnf_hw_info {
@@ -138,14 +145,16 @@ struct qtnf_hw_info {
struct ieee80211_regdomain *rd;
u8 total_tx_chain;
u8 total_rx_chain;
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ u32 hw_version;
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
+void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
- const char *name, unsigned char name_assign_type,
- enum nl80211_iftype iftype);
+ const char *name, unsigned char name_assign_type);
void qtnf_main_work_queue(struct work_struct *work);
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
@@ -153,9 +162,13 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
void qtnf_wake_all_queues(struct net_device *ndev);
+void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
+void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
+
void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
+void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 4abc6d9ed560..bcd415f96412 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -59,10 +59,11 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr,
frame_control);
- qtnf_sta_list_add(&vif->sta_list, sta_addr);
+ qtnf_sta_list_add(vif, sta_addr);
sinfo.assoc_req_ies = NULL;
sinfo.assoc_req_ies_len = 0;
+ sinfo.generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
@@ -132,7 +133,7 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid,
sta_addr, reason);
- if (qtnf_sta_list_del(&vif->sta_list, sta_addr))
+ if (qtnf_sta_list_del(vif, sta_addr))
cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr,
GFP_KERNEL);
@@ -237,9 +238,8 @@ qtnf_event_handle_mgmt_received(struct qtnf_vif *vif,
pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len,
le16_to_cpu(frame->frame_control), frame->addr2);
- cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq),
- le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data,
- frame_len, flags);
+ cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), rxmgmt->sig_dbm,
+ rxmgmt->frame_data, frame_len, flags);
return 0;
}
@@ -324,7 +324,7 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
sr->bssid, get_unaligned_le64(&sr->tsf),
le16_to_cpu(sr->capab),
le16_to_cpu(sr->bintval), ies, ies_len,
- sr->signal, GFP_KERNEL);
+ DBM_TO_MBM(sr->sig_dbm), GFP_KERNEL);
if (!bss)
return -ENOMEM;
@@ -369,7 +369,8 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
qlink_chandef_q2cfg(wiphy, &data->chan, &chandef);
if (!cfg80211_chandef_valid(&chandef)) {
- pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid,
+ pr_err("MAC%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ mac->macid, chandef.chan->center_freq,
chandef.center_freq1, chandef.center_freq2,
chandef.width);
return -EINVAL;
@@ -394,6 +395,63 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
return 0;
}
+static int qtnf_event_handle_radar(struct qtnf_vif *vif,
+ const struct qlink_event_radar *ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ struct cfg80211_chan_def chandef;
+
+ if (len < sizeof(*ev)) {
+ pr_err("MAC%u: payload is too short\n", vif->mac->macid);
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ qlink_chandef_q2cfg(wiphy, &ev->chan, &chandef);
+
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n",
+ vif->mac->macid,
+ chandef.center_freq1, chandef.center_freq2,
+ chandef.width);
+ return -EINVAL;
+ }
+
+ pr_info("%s: radar event=%u f1=%u f2=%u bw=%u\n",
+ vif->netdev->name, ev->event,
+ chandef.center_freq1, chandef.center_freq2,
+ chandef.width);
+
+ switch (ev->event) {
+ case QLINK_RADAR_DETECTED:
+ cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
+ break;
+ case QLINK_RADAR_CAC_FINISHED:
+ if (!vif->wdev.cac_started)
+ break;
+
+ cfg80211_cac_event(vif->netdev, &chandef,
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ break;
+ case QLINK_RADAR_CAC_ABORTED:
+ if (!vif->wdev.cac_started)
+ break;
+
+ cfg80211_cac_event(vif->netdev, &chandef,
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ break;
+ default:
+ pr_warn("%s: unhandled radar event %u\n",
+ vif->netdev->name, ev->event);
+ break;
+ }
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -448,6 +506,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_freq_change(mac, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_RADAR:
+ ret = qtnf_event_handle_radar(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
@@ -479,9 +541,9 @@ static int qtnf_event_process_skb(struct qtnf_bus *bus,
if (unlikely(!mac))
return -ENXIO;
- qtnf_bus_lock(bus);
+ rtnl_lock();
res = qtnf_event_parse(mac, skb);
- qtnf_bus_unlock(bus);
+ rtnl_unlock();
return res;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 7e487622d87d..6f6190964320 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -615,8 +615,7 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
PCI_DMA_TODEVICE);
if (skb->dev) {
- skb->dev->stats.tx_packets++;
- skb->dev->stats.tx_bytes += skb->len;
+ qtnf_update_tx_stats(skb->dev, skb);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -855,9 +854,7 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
+ qtnf_update_rx_stats(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(napi, skb);
} else {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index a432fb001c41..9bf3ae4d1b3b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -19,7 +19,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 6
+#define QLINK_PROTO_VER 11
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -122,17 +122,49 @@ enum qlink_channel_width {
};
/**
+ * struct qlink_channel - qlink control channel definition
+ *
+ * @hw_value: hardware-specific value for the channel
+ * @center_freq: center frequency in MHz
+ * @flags: channel flags from &enum qlink_channel_flags
+ * @band: band this channel belongs to
+ * @max_antenna_gain: maximum antenna gain in dBi
+ * @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
+ * @dfs_state: current state of this channel.
+ * Only relevant if radar is required on this channel.
+ * @beacon_found: helper to regulatory code to indicate when a beacon
+ * has been found on this channel. Use regulatory_hint_found_beacon()
+ * to enable this, this is useful only on 5 GHz band.
+ */
+struct qlink_channel {
+ __le16 hw_value;
+ __le16 center_freq;
+ __le32 flags;
+ u8 band;
+ u8 max_antenna_gain;
+ u8 max_power;
+ u8 max_reg_power;
+ __le32 dfs_cac_ms;
+ u8 dfs_state;
+ u8 beacon_found;
+ u8 rsvd[2];
+} __packed;
+
+/**
* struct qlink_chandef - qlink channel definition
*
+ * @chan: primary channel definition
* @center_freq1: center frequency of first segment
* @center_freq2: center frequency of second segment (80+80 only)
* @width: channel width, one of @enum qlink_channel_width
*/
struct qlink_chandef {
+ struct qlink_channel chan;
__le16 center_freq1;
__le16 center_freq2;
u8 width;
- u8 rsvd[3];
+ u8 rsvd;
} __packed;
#define QLINK_MAX_NR_CIPHER_SUITES 5
@@ -153,6 +185,17 @@ struct qlink_auth_encr {
u8 rsvd[2];
} __packed;
+/**
+ * struct qlink_sta_info_state - station flags mask/value
+ *
+ * @mask: STA flags mask, bitmap of &enum qlink_sta_flags
+ * @value: STA flags values, bitmap of &enum qlink_sta_flags
+ */
+struct qlink_sta_info_state {
+ __le32 mask;
+ __le32 value;
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -173,6 +216,7 @@ struct qlink_auth_encr {
* @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This
* command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
* capability.
+ * @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
*/
enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
@@ -192,8 +236,10 @@ enum qlink_cmd_type {
QLINK_CMD_BAND_INFO_GET = 0x001A,
QLINK_CMD_CHAN_SWITCH = 0x001B,
QLINK_CMD_CHAN_GET = 0x001C,
+ QLINK_CMD_START_CAC = 0x001D,
QLINK_CMD_START_AP = 0x0021,
QLINK_CMD_STOP_AP = 0x0022,
+ QLINK_CMD_SET_MAC_ACL = 0x0023,
QLINK_CMD_GET_STA_INFO = 0x0030,
QLINK_CMD_ADD_KEY = 0x0040,
QLINK_CMD_DEL_KEY = 0x0041,
@@ -368,16 +414,14 @@ struct qlink_cmd_set_def_mgmt_key {
/**
* struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command
*
- * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags
- * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags
+ * @flag_update: STA flags to update
* @if_type: Mode of interface operation, one of &enum qlink_iface_type
* @vlanid: VLAN ID to assign to specific STA
* @sta_addr: address of the STA for which parameters are set.
*/
struct qlink_cmd_change_sta {
struct qlink_cmd chdr;
- __le32 sta_flags_mask;
- __le32 sta_flags_set;
+ struct qlink_sta_info_state flag_update;
__le16 if_type;
__le16 vlanid;
u8 sta_addr[ETH_ALEN];
@@ -585,6 +629,40 @@ struct qlink_cmd_start_ap {
u8 info[0];
} __packed;
+/**
+ * struct qlink_cmd_start_cac - data for QLINK_CMD_START_CAC command
+ *
+ * @chan: a channel to start a radar detection procedure on.
+ * @cac_time_ms: CAC time.
+ */
+struct qlink_cmd_start_cac {
+ struct qlink_cmd chdr;
+ struct qlink_chandef chan;
+ __le32 cac_time_ms;
+} __packed;
+
+enum qlink_acl_policy {
+ QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED,
+ QLINK_ACL_POLICY_DENY_UNLESS_LISTED,
+};
+
+struct qlink_mac_address {
+ u8 addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_acl_data - ACL data
+ *
+ * @policy: filter policy, one of &enum qlink_acl_policy.
+ * @num_entries: number of MAC addresses in array.
+ * @mac_address: MAC addresses array.
+ */
+struct qlink_acl_data {
+ __le32 policy;
+ __le32 num_entries;
+ struct qlink_mac_address mac_addrs[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -646,6 +724,7 @@ struct qlink_resp_get_mac_info {
struct ieee80211_ht_cap ht_cap_mod_mask;
__le16 max_ap_assoc_sta;
__le16 radar_detect_widths;
+ __le32 max_acl_mac_addrs;
u8 bands_cap;
u8 rsvd[1];
u8 var_info[0];
@@ -685,6 +764,9 @@ struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
__le32 fw_ver;
__le32 hw_capab;
+ __le32 bld_tmstamp;
+ __le32 plat_id;
+ __le32 hw_ver;
__le16 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
@@ -709,17 +791,27 @@ struct qlink_resp_manage_intf {
struct qlink_intf_info intf_info;
} __packed;
+enum qlink_sta_info_rate_flags {
+ QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0),
+ QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
+ QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
+ QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
+};
+
/**
* struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command
*
* Response data containing statistics for specified STA.
*
+ * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
+ * is valid.
* @sta_addr: MAC address of STA the response carries statistic for.
- * @info: statistics for specified STA.
+ * @info: variable statistics for specified STA.
*/
struct qlink_resp_get_sta_info {
struct qlink_resp rhdr;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
u8 info[0];
} __packed;
@@ -782,6 +874,7 @@ enum qlink_event_type {
QLINK_EVENT_BSS_JOIN = 0x0026,
QLINK_EVENT_BSS_LEAVE = 0x0027,
QLINK_EVENT_FREQ_CHANGE = 0x0028,
+ QLINK_EVENT_RADAR = 0x0029,
};
/**
@@ -869,15 +962,16 @@ enum qlink_rxmgmt_flags {
* struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event
*
* @freq: Frequency on which the frame was received in MHz.
- * @sig_dbm: signal strength in dBm.
* @flags: bitmap of &enum qlink_rxmgmt_flags.
+ * @sig_dbm: signal strength in dBm.
* @frame_data: data of Rx'd frame itself.
*/
struct qlink_event_rxmgmt {
struct qlink_event ehdr;
__le32 freq;
- __le32 sig_dbm;
__le32 flags;
+ s8 sig_dbm;
+ u8 rsvd[3];
u8 frame_data[0];
} __packed;
@@ -889,7 +983,7 @@ struct qlink_event_rxmgmt {
* event was generated was discovered.
* @capab: capabilities field.
* @bintval: beacon interval announced by discovered BSS.
- * @signal: signal strength.
+ * @sig_dbm: signal strength in dBm.
* @bssid: BSSID announced by discovered BSS.
* @ssid_len: length of SSID announced by BSS.
* @ssid: SSID announced by discovered BSS.
@@ -901,7 +995,7 @@ struct qlink_event_scan_result {
__le16 freq;
__le16 capab;
__le16 bintval;
- s8 signal;
+ s8 sig_dbm;
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 bssid[ETH_ALEN];
@@ -931,9 +1025,39 @@ struct qlink_event_scan_complete {
__le32 flags;
} __packed;
+enum qlink_radar_event {
+ QLINK_RADAR_DETECTED,
+ QLINK_RADAR_CAC_FINISHED,
+ QLINK_RADAR_CAC_ABORTED,
+ QLINK_RADAR_NOP_FINISHED,
+ QLINK_RADAR_PRE_CAC_EXPIRED,
+};
+
+/**
+ * struct qlink_event_radar - data for QLINK_EVENT_RADAR event
+ *
+ * @chan: channel on which radar event happened.
+ * @event: radar event type, one of &enum qlink_radar_event.
+ */
+struct qlink_event_radar {
+ struct qlink_event ehdr;
+ struct qlink_chandef chan;
+ u8 event;
+ u8 rsvd[3];
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
+/**
+ * enum qlink_tlv_id - list of TLVs that Qlink messages can carry
+ *
+ * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
+ * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
+ * &struct qlink_sta_stats. Valid values are marked as such in a bitmap
+ * carried by QTN_TLV_ID_STA_STATS_MAP.
+ */
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
QTN_TLV_ID_RTS_THRESH = 0x0202,
@@ -942,15 +1066,24 @@ enum qlink_tlv_id {
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
QTN_TLV_ID_CHANDEF = 0x0210,
+ QTN_TLV_ID_STA_STATS_MAP = 0x0211,
+ QTN_TLV_ID_STA_STATS = 0x0212,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
QTN_TLV_ID_CHANNEL_STATS = 0x0216,
- QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300,
- QTN_TLV_ID_STA_GENERIC_INFO = 0x0301,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
QTN_TLV_ID_IE_SET = 0x0305,
+ QTN_TLV_ID_EXT_CAPABILITY_MASK = 0x0306,
+ QTN_TLV_ID_ACL_DATA = 0x0307,
+ QTN_TLV_ID_BUILD_NAME = 0x0401,
+ QTN_TLV_ID_BUILD_REV = 0x0402,
+ QTN_TLV_ID_BUILD_TYPE = 0x0403,
+ QTN_TLV_ID_BUILD_LABEL = 0x0404,
+ QTN_TLV_ID_HW_ID = 0x0405,
+ QTN_TLV_ID_CALIBRATION_VER = 0x0406,
+ QTN_TLV_ID_UBOOT_VER = 0x0407,
};
struct qlink_tlv_hdr {
@@ -959,76 +1092,24 @@ struct qlink_tlv_hdr {
u8 val[0];
} __packed;
-struct qlink_iface_limit {
- __le16 max_num;
- __le16 type;
-} __packed;
-
struct qlink_iface_comb_num {
- __le16 iface_comb_num;
+ __le32 iface_comb_num;
} __packed;
-struct qlink_sta_stat_basic_counters {
- __le64 rx_bytes;
- __le64 tx_bytes;
- __le64 rx_beacons;
- __le32 rx_packets;
- __le32 tx_packets;
- __le32 rx_dropped;
- __le32 tx_failed;
-} __packed;
-
-enum qlink_sta_info_rate_flags {
- QLINK_STA_INFO_RATE_FLAG_INVALID = 0,
- QLINK_STA_INFO_RATE_FLAG_HT_MCS = BIT(0),
- QLINK_STA_INFO_RATE_FLAG_VHT_MCS = BIT(1),
- QLINK_STA_INFO_RATE_FLAG_SHORT_GI = BIT(2),
- QLINK_STA_INFO_RATE_FLAG_60G = BIT(3),
-};
-
-enum qlink_sta_info_rate_bw {
- QLINK_STA_INFO_RATE_BW_5 = 0,
- QLINK_STA_INFO_RATE_BW_10 = 1,
- QLINK_STA_INFO_RATE_BW_20 = 2,
- QLINK_STA_INFO_RATE_BW_40 = 3,
- QLINK_STA_INFO_RATE_BW_80 = 4,
- QLINK_STA_INFO_RATE_BW_160 = 5,
-};
-
-/**
- * struct qlink_sta_info_rate - STA rate statistics
- *
- * @rate: data rate in Mbps.
- * @flags: bitmap of &enum qlink_sta_flags.
- * @mcs: 802.11-defined MCS index.
- * nss: Number of Spatial Streams.
- * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw.
- */
-struct qlink_sta_info_rate {
- __le16 rate;
- u8 flags;
- u8 mcs;
- u8 nss;
- u8 bw;
+struct qlink_iface_limit {
+ __le16 max_num;
+ __le16 type;
} __packed;
-struct qlink_sta_info_state {
- __le32 mask;
- __le32 value;
+struct qlink_iface_limit_record {
+ __le16 max_interfaces;
+ u8 num_different_channels;
+ u8 n_limits;
+ struct qlink_iface_limit limits[0];
} __packed;
#define QLINK_RSSI_OFFSET 120
-struct qlink_sta_info_generic {
- struct qlink_sta_info_state state;
- __le32 connected_time;
- __le32 inactive_time;
- struct qlink_sta_info_rate rx_rate;
- struct qlink_sta_info_rate tx_rate;
- u8 rssi;
- u8 rssi_avg;
-} __packed;
-
struct qlink_tlv_frag_rts_thr {
struct qlink_tlv_hdr hdr;
__le16 thr;
@@ -1113,19 +1194,16 @@ enum qlink_dfs_state {
QLINK_DFS_AVAILABLE,
};
+/**
+ * struct qlink_tlv_channel - data for QTN_TLV_ID_CHANNEL TLV
+ *
+ * Channel settings.
+ *
+ * @channel: ieee80211 channel settings.
+ */
struct qlink_tlv_channel {
struct qlink_tlv_hdr hdr;
- __le16 hw_value;
- __le16 center_freq;
- __le32 flags;
- u8 band;
- u8 max_antenna_gain;
- u8 max_power;
- u8 max_reg_power;
- __le32 dfs_cac_ms;
- u8 dfs_state;
- u8 beacon_found;
- u8 rsvd[2];
+ struct qlink_channel chan;
} __packed;
/**
@@ -1137,7 +1215,7 @@ struct qlink_tlv_channel {
*/
struct qlink_tlv_chandef {
struct qlink_tlv_hdr hdr;
- struct qlink_chandef chan;
+ struct qlink_chandef chdef;
} __packed;
enum qlink_ie_set_type {
@@ -1176,4 +1254,105 @@ struct qlink_chan_stats {
s8 chan_noise;
} __packed;
+/**
+ * enum qlink_sta_info - station information bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_sta_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_STA_STATS_MAP.
+ *
+ * @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
+ * @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
+ * @QLINK_STA_INFO_RX_BYTES: lower 32 bits of rx_bytes value are valid.
+ * @QLINK_STA_INFO_TX_BYTES: lower 32 bits of tx_bytes value are valid.
+ * @QLINK_STA_INFO_RX_BYTES64: rx_bytes value is valid.
+ * @QLINK_STA_INFO_TX_BYTES64: tx_bytes value is valid.
+ * @QLINK_STA_INFO_RX_DROP_MISC: rx_dropped_misc value is valid.
+ * @QLINK_STA_INFO_BEACON_RX: rx_beacon value is valid.
+ * @QLINK_STA_INFO_SIGNAL: signal value is valid.
+ * @QLINK_STA_INFO_SIGNAL_AVG: signal_avg value is valid.
+ * @QLINK_STA_INFO_RX_BITRATE: rxrate value is valid.
+ * @QLINK_STA_INFO_TX_BITRATE: txrate value is valid.
+ * @QLINK_STA_INFO_RX_PACKETS: rx_packets value is valid.
+ * @QLINK_STA_INFO_TX_PACKETS: tx_packets value is valid.
+ * @QLINK_STA_INFO_TX_RETRIES: tx_retries value is valid.
+ * @QLINK_STA_INFO_TX_FAILED: tx_failed value is valid.
+ * @QLINK_STA_INFO_STA_FLAGS: sta_flags value is valid.
+ */
+enum qlink_sta_info {
+ QLINK_STA_INFO_CONNECTED_TIME,
+ QLINK_STA_INFO_INACTIVE_TIME,
+ QLINK_STA_INFO_RX_BYTES,
+ QLINK_STA_INFO_TX_BYTES,
+ QLINK_STA_INFO_RX_BYTES64,
+ QLINK_STA_INFO_TX_BYTES64,
+ QLINK_STA_INFO_RX_DROP_MISC,
+ QLINK_STA_INFO_BEACON_RX,
+ QLINK_STA_INFO_SIGNAL,
+ QLINK_STA_INFO_SIGNAL_AVG,
+ QLINK_STA_INFO_RX_BITRATE,
+ QLINK_STA_INFO_TX_BITRATE,
+ QLINK_STA_INFO_RX_PACKETS,
+ QLINK_STA_INFO_TX_PACKETS,
+ QLINK_STA_INFO_TX_RETRIES,
+ QLINK_STA_INFO_TX_FAILED,
+ QLINK_STA_INFO_STA_FLAGS,
+ QLINK_STA_INFO_NUM,
+};
+
+/**
+ * struct qlink_sta_info_rate - STA rate statistics
+ *
+ * @rate: data rate in Mbps.
+ * @flags: bitmap of &enum qlink_sta_info_rate_flags.
+ * @mcs: 802.11-defined MCS index.
+ * nss: Number of Spatial Streams.
+ * @bw: bandwidth, one of &enum qlink_channel_width.
+ */
+struct qlink_sta_info_rate {
+ __le16 rate;
+ u8 flags;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+} __packed;
+
+/**
+ * struct qlink_sta_stats - data for QTN_TLV_ID_STA_STATS
+ *
+ * Carries statistics of a STA. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_sta_info. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_STA_STATS_MAP.
+ */
+struct qlink_sta_stats {
+ __le64 rx_bytes;
+ __le64 tx_bytes;
+ __le64 rx_beacon;
+ __le64 rx_duration;
+ __le64 t_offset;
+ __le32 connected_time;
+ __le32 inactive_time;
+ __le32 rx_packets;
+ __le32 tx_packets;
+ __le32 tx_retries;
+ __le32 tx_failed;
+ __le32 rx_dropped_misc;
+ __le32 beacon_loss_count;
+ __le32 expected_throughput;
+ struct qlink_sta_info_state sta_flags;
+ struct qlink_sta_info_rate txrate;
+ struct qlink_sta_info_rate rxrate;
+ __le16 llid;
+ __le16 plid;
+ u8 local_pm;
+ u8 peer_pm;
+ u8 nonpeer_pm;
+ u8 rx_beacon_signal_avg;
+ u8 plink_state;
+ u8 signal;
+ u8 signal_avg;
+ u8 rsvd[1];
+};
+
#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 61d999affb09..aeeda81b09ea 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -100,34 +100,6 @@ static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw)
}
}
-void qlink_chandef_q2cfg(struct wiphy *wiphy,
- const struct qlink_chandef *qch,
- struct cfg80211_chan_def *chdef)
-{
- chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
- chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
- chdef->width = qlink_chanwidth_to_nl(qch->width);
-
- switch (chdef->width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- case NL80211_CHAN_WIDTH_20:
- case NL80211_CHAN_WIDTH_5:
- case NL80211_CHAN_WIDTH_10:
- chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1);
- break;
- case NL80211_CHAN_WIDTH_40:
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_160:
- chdef->chan = ieee80211_get_channel(wiphy,
- chdef->center_freq1 - 10);
- break;
- default:
- chdef->chan = NULL;
- break;
- }
-}
-
static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
{
switch (nlwidth) {
@@ -152,9 +124,29 @@ static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
}
}
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+ const struct qlink_chandef *qch,
+ struct cfg80211_chan_def *chdef)
+{
+ struct ieee80211_channel *chan;
+
+ chan = ieee80211_get_channel(wiphy, le16_to_cpu(qch->chan.center_freq));
+
+ chdef->chan = chan;
+ chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
+ chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
+ chdef->width = qlink_chanwidth_to_nl(qch->width);
+}
+
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
struct qlink_chandef *qch)
{
+ struct ieee80211_channel *chan = chdef->chan;
+
+ qch->chan.hw_value = cpu_to_le16(chan->hw_value);
+ qch->chan.center_freq = cpu_to_le16(chan->center_freq);
+ qch->chan.flags = cpu_to_le32(chan->flags);
+
qch->center_freq1 = cpu_to_le16(chdef->center_freq1);
qch->center_freq2 = cpu_to_le16(chdef->center_freq2);
qch->width = qlink_chanwidth_nl_to_qlink(chdef->width);
@@ -172,3 +164,33 @@ enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val)
return QLINK_HIDDEN_SSID_NOT_IN_USE;
}
}
+
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+ unsigned int arr_max_len)
+{
+ unsigned int idx = bit / BITS_PER_BYTE;
+ u8 mask = 1 << (bit - (idx * BITS_PER_BYTE));
+
+ if (idx >= arr_max_len)
+ return false;
+
+ return arr[idx] & mask;
+}
+
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+ struct qlink_acl_data *qacl)
+{
+ switch (acl->acl_policy) {
+ case NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED:
+ qacl->policy =
+ cpu_to_le32(QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED);
+ break;
+ case NL80211_ACL_POLICY_DENY_UNLESS_LISTED:
+ qacl->policy = cpu_to_le32(QLINK_ACL_POLICY_DENY_UNLESS_LISTED);
+ break;
+ }
+
+ qacl->num_entries = cpu_to_le32(acl->n_acl_entries);
+ memcpy(qacl->mac_addrs, acl->mac_addrs,
+ acl->n_acl_entries * sizeof(*qacl->mac_addrs));
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 260383d6d109..54caeb38917c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -69,5 +69,9 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
struct qlink_chandef *qch);
enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val);
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+ unsigned int arr_max_len);
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+ struct qlink_acl_data *qacl);
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index ed38e87471bf..e745733ba417 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -57,9 +57,10 @@ struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
return NULL;
}
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
const u8 *mac)
{
+ struct qtnf_sta_list *list = &vif->sta_list;
struct qtnf_sta_node *node;
if (unlikely(!mac))
@@ -77,13 +78,15 @@ struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
ether_addr_copy(node->mac_addr, mac);
list_add_tail(&node->list, &list->head);
atomic_inc(&list->size);
+ ++vif->generation;
done:
return node;
}
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac)
{
+ struct qtnf_sta_list *list = &vif->sta_list;
struct qtnf_sta_node *node;
bool ret = false;
@@ -93,6 +96,7 @@ bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
list_del(&node->list);
atomic_dec(&list->size);
kfree(node);
+ ++vif->generation;
ret = true;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index 0359eae8c24b..0d4d92b11540 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -26,9 +26,9 @@ struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
const u8 *mac);
struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
size_t index);
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
const u8 *mac);
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac);
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac);
void qtnf_sta_list_free(struct qtnf_sta_list *list);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index d2c289446c00..429d07b651dd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4903,7 +4903,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
min_sleep = 2000;
break;
default:
- WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+ WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration",
rt2x00dev->chip.rf);
return;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index f4fdad2ed319..72c55d1f8903 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -301,7 +301,7 @@ exit:
return status;
}
-static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
+static __poll_t rt2x00debug_poll_queue_dump(struct file *file,
poll_table *wait)
{
struct rt2x00debug_intf *intf = file->private_data;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index ecc96312a370..a971bc7a6b63 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -142,32 +142,28 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
if (!rt2x00dev->ops->hw->set_rts_threshold &&
(tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
IEEE80211_TX_RC_USE_CTS_PROTECT))) {
- if (rt2x00queue_available(queue) <= 1)
- goto exit_fail;
+ if (rt2x00queue_available(queue) <= 1) {
+ /*
+ * Recheck for full queue under lock to avoid race
+ * conditions with rt2x00lib_txdone().
+ */
+ spin_lock(&queue->tx_lock);
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
+ spin_unlock(&queue->tx_lock);
+
+ goto exit_free_skb;
+ }
if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
- goto exit_fail;
+ goto exit_free_skb;
}
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
- goto exit_fail;
-
- /*
- * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
- * we should not use spin_lock_bh variant as bottom halve was already
- * disabled before ieee80211_xmit() call.
- */
- spin_lock(&queue->tx_lock);
- if (rt2x00queue_threshold(queue))
- rt2x00queue_pause_queue(queue);
- spin_unlock(&queue->tx_lock);
+ goto exit_free_skb;
return;
- exit_fail:
- spin_lock(&queue->tx_lock);
- rt2x00queue_pause_queue(queue);
- spin_unlock(&queue->tx_lock);
exit_free_skb:
ieee80211_free_txskb(hw, skb);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a2c1ca5c76d1..a6884e73d2ab 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -715,6 +715,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_kick_tx_queue(queue, &txdesc);
out:
+ /*
+ * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
+ * do this under queue->tx_lock. Bottom halve was already disabled
+ * before ieee80211_xmit() call.
+ */
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
+
spin_unlock(&queue->tx_lock);
return ret;
}
@@ -1244,10 +1252,8 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- rt2x00_err(rt2x00dev, "Queue allocation failed\n");
+ if (!queue)
return -ENOMEM;
- }
/*
* Initialize pointers
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index e6668ffb77e6..ff0971f1e2c8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
cck_power = priv->channels[channel - 1].hw_value & 0xF;
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
- if (cck_power > 15)
- cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
- else
- cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
+ cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 95e3993d8a33..8828baf26e7b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1172,7 +1172,7 @@ struct rtl8723bu_c2h {
u8 basic_rate:1;
u8 bt_has_reset:1;
- u8 dummy4_1:1;;
+ u8 dummy4_1:1;
u8 ignore_wlan:1;
u8 auto_report:1;
u8 dummy4_2:3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index cad2272ae21b..d6c03bd5cc65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -244,7 +244,8 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE ||
+ rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
u16 mcs_map;
vht_cap->vht_supported = true;
@@ -395,6 +396,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
/* swlps or hwlps has been set in diff chip in init_sw_vars */
if (rtlpriv->psc.swctrl_lps) {
@@ -551,7 +553,8 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
- spin_lock_init(&rtlpriv->locks.ips_lock);
+ mutex_init(&rtlpriv->locks.ips_mutex);
+ mutex_init(&rtlpriv->locks.lps_mutex);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
@@ -561,9 +564,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
spin_lock_init(&rtlpriv->locks.scan_list_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
- spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
spin_lock_init(&rtlpriv->locks.fw_ps_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.iqk_lock);
/* <5> init list */
INIT_LIST_HEAD(&rtlpriv->entry_list);
@@ -697,14 +698,94 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
}
}
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 ret = 0;
+
+ switch (rate_index) {
+ case RATR_INX_WIRELESS_NGB:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_BGN_40M_1SS;
+ else
+ ret = RATEID_IDX_BGN_40M_2SS;
+ ; break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_GN_N1SS;
+ else
+ ret = RATEID_IDX_GN_N2SS;
+ ; break;
+ case RATR_INX_WIRELESS_NB:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_BGN_20M_1SS_BN;
+ else
+ ret = RATEID_IDX_BGN_20M_2SS_BN;
+ ; break;
+ case RATR_INX_WIRELESS_GB:
+ ret = RATEID_IDX_BG;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = RATEID_IDX_G;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = RATEID_IDX_B;
+ break;
+ case RATR_INX_WIRELESS_MC:
+ if (wirelessmode == WIRELESS_MODE_B ||
+ wirelessmode == WIRELESS_MODE_G ||
+ wirelessmode == WIRELESS_MODE_N_24G ||
+ wirelessmode == WIRELESS_MODE_AC_24G)
+ ret = RATEID_IDX_BG;
+ else
+ ret = RATEID_IDX_G;
+ break;
+ case RATR_INX_WIRELESS_AC_5N:
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_VHT_1SS;
+ else
+ ret = RATEID_IDX_VHT_2SS;
+ break;
+ case RATR_INX_WIRELESS_AC_24N:
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_VHT_1SS;
+ else
+ ret = RATEID_IDX_VHT_2SS;
+ } else {
+ if (rtlphy->rf_type == RF_1T1R)
+ ret = RATEID_IDX_MIX1;
+ else
+ ret = RATEID_IDX_MIX2;
+ }
+ break;
+ default:
+ ret = RATEID_IDX_BGN_40M_2SS;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rtl_mrate_idx_to_arfr_id);
+
static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct rtl_tcb_desc *tcb_desc)
{
+#define SET_RATE_ID(rate_id) \
+ ({typeof(rate_id) _id = rate_id; \
+ ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \
+ rtl_mrate_idx_to_arfr_id(hw, _id, \
+ (sta_entry ? sta_entry->wireless_mode : \
+ WIRELESS_MODE_G)) : \
+ _id); })
+
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_sta_info *sta_entry = NULL;
- u8 ratr_index = 7;
+ u8 ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
if (sta) {
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
@@ -719,7 +800,8 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
tcb_desc->use_driver_rate = 1;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_MC);
} else {
tcb_desc->ratr_index = ratr_index;
}
@@ -735,22 +817,30 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
tcb_desc->mac_id = 0;
- if (mac->mode == WIRELESS_MODE_AC_5G)
+ if (sta &&
+ (rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID))
+ ; /* use sta_entry->ratr_index */
+ else if (mac->mode == WIRELESS_MODE_AC_5G)
tcb_desc->ratr_index =
- RATR_INX_WIRELESS_AC_5N;
+ SET_RATE_ID(RATR_INX_WIRELESS_AC_5N);
else if (mac->mode == WIRELESS_MODE_AC_24G)
tcb_desc->ratr_index =
- RATR_INX_WIRELESS_AC_24N;
+ SET_RATE_ID(RATR_INX_WIRELESS_AC_24N);
else if (mac->mode == WIRELESS_MODE_N_24G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_NGB);
else if (mac->mode == WIRELESS_MODE_N_5G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_NG);
else if (mac->mode & WIRELESS_MODE_G)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_GB);
else if (mac->mode & WIRELESS_MODE_B)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_B);
else if (mac->mode & WIRELESS_MODE_A)
- tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_G);
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
@@ -764,6 +854,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
}
}
}
+#undef SET_RATE_ID
}
static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
@@ -859,8 +950,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 hw_rate;
- if ((get_rf_type(rtlphy) == RF_2T2R) &&
- (sta->ht_cap.mcs.rx_mask[1] != 0))
+ if (get_rf_type(rtlphy) == RF_2T2R &&
+ sta->ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1140,9 +1231,19 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
{
+#define SET_RATE_ID(rate_id) \
+ ({typeof(rate_id) _id = rate_id; \
+ ((rtlpriv->cfg->spec_ver & RTL_SPEC_NEW_RATEID) ? \
+ rtl_mrate_idx_to_arfr_id(hw, _id, \
+ (sta_entry ? sta_entry->wireless_mode : \
+ WIRELESS_MODE_G)) : \
+ _id); })
+
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ struct rtl_sta_info *sta_entry =
+ (sta ? (struct rtl_sta_info *)sta->drv_priv : NULL);
__le16 fc = rtl_get_fc(skb);
@@ -1165,7 +1266,8 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
if (info->control.rates[0].idx == 0 ||
ieee80211_is_nullfunc(fc)) {
tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index =
+ SET_RATE_ID(RATR_INX_WIRELESS_MC);
tcb_desc->disable_ratefallback = 1;
} else {
@@ -1180,7 +1282,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && (sta->ht_cap.ht_supported)) {
+ if (sta && sta->ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -1207,11 +1309,12 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
_rtl_query_protection_mode(hw, tcb_desc, info);
} else {
tcb_desc->use_driver_rate = true;
- tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
+ tcb_desc->ratr_index = SET_RATE_ID(RATR_INX_WIRELESS_MC);
tcb_desc->disable_ratefallback = 1;
tcb_desc->mac_id = 0;
tcb_desc->packet_bw = false;
}
+#undef SET_RATE_ID
}
EXPORT_SYMBOL(rtl_get_tcb_desc);
@@ -1229,7 +1332,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
- rtl_ips_nic_on(hw);
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1321,6 +1423,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (capab &
IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ if (tid >= MAX_TID_COUNT) {
+ rcu_read_unlock();
+ return true;
+ }
tid_data = &sta_entry->tids[tid];
if (tid_data->agg.rx_agg_state ==
RTL_RX_AGG_START)
@@ -1726,7 +1832,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
{
struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
- u8 reject_agg, ctrl_agg_size = 0, agg_size;
+ u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
@@ -1972,9 +2078,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
goto label_lps_done;
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ if (rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod > 8 ||
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
else
rtl_lps_enter(hw);
@@ -2225,9 +2331,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
- * According to Kernel Code, here is right.
- */
+ /* fall through */
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2528,6 +2632,9 @@ static int __init rtl_core_module_init(void)
if (rtl_rate_control_register())
pr_err("rtl: Unable to register rtl_rc, use default RC !!\n");
+ /* add debugfs */
+ rtl_debugfs_add_topdir();
+
/* init some global vars */
INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
spin_lock_init(&rtl_global_var.glb_list_lock);
@@ -2539,6 +2646,9 @@ static void __exit rtl_core_module_exit(void)
{
/*RC*/
rtl_rate_control_unregister();
+
+ /* remove debugfs */
+ rtl_debugfs_remove_topdir();
}
module_init(rtl_core_module_init);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 26735319b38f..acc924635818 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -162,6 +162,8 @@ void rtl_c2hcmd_wq_callback(void *data);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
+u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index,
+ enum wireless_mode wirelessmode);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 44c25724529e..8fce371749d3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -2704,11 +2704,11 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8192e2ant_init_coex_dm(btcoexist);
}
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u16 u16tmp[4];
u32 u32tmp[4];
@@ -2719,75 +2719,64 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ===========[Under Manual Control]===========");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ===========[Under Manual Control]===========");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
board_info->pg_ant_num, board_info->btdm_ant_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsMode(HsChnl)",
- wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsMode(HsChnl)",
+ wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
((!wifi_busy) ? "idle" :
((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
"uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
((btcoexist->bt_info.bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ?
("inquiry/page scan") :
@@ -2797,131 +2786,129 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
coex_dm->bt_status) ? "connected-idle" : "busy")))),
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", stack_info->sco_exist,
stack_info->hid_exist, stack_info->pan_exist,
stack_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8192e_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8192e_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
- coex_dm->cur_ss_type);
+ seq_printf(m, "\n %-35s = 0x%x ", "SS Type",
+ coex_dm->cur_ss_type);
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+
+ seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ seq_printf(m,
+ "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
- coex_dm->backup_ampdu_maxtime);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_maxtime);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
- u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x", "0x778", u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
- u32tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+ u32tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(hp rx[31:16]/tx[15:0])",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(lp rx[31:16]/tx[15:0])",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ seq_printf(m,
+ "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(hp rx[31:16]/tx[15:0])",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(lp rx[31:16]/tx[15:0])",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
if (btcoexist->auto_report_2ant)
btc8192e2ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
index 65502acee52c..b8c95c7afc06 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
@@ -180,4 +180,5 @@ void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
u8 type);
void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 5f726f6d3567..fd3b1fb35dff 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -2474,12 +2474,12 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
halbtc8723b1ant_query_bt_info(btcoexist);
}
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0;
u16 u16tmp[4];
u32 u32tmp[4];
@@ -2491,62 +2491,56 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]==========");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Under Manual Control]==========");
+ seq_puts(m, "\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Coex is STOPPED]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2555,110 +2549,106 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
- ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
- ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
- ((!wifi_busy) ? "idle" :
- ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
- "uplink" : "downlink")));
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
&wifi_link_status);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
- "sta/vwifi/hs/p2pGo/p2pGc",
- ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
- ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
- "BT [status/ rssi/ retryCnt]",
- ((coex_sta->bt_disabled) ? ("disabled") :
- ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
- ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "non-connected idle" :
- ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
- coex_dm->bt_status) ?
- "connected-idle" : "busy")))),
- coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
- bt_link_info->hid_exist, bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+ "sta/vwifi/hs/p2pGo/p2pGc",
+ ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
+ ((coex_sta->bt_disabled) ? ("disabled") :
+ ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+ ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "non-connected idle" :
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) ?
+ "connected-idle" : "busy")))),
+ coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+ bt_link_info->hid_exist, bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8723b_1ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8723b_1ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
if (!btcoexist->manual_control) {
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
- "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+ seq_printf(m, "\n %-35s = %d/",
+ "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
+ seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+ "DelBA/ BtCtrlAgg/ AggSize",
(btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
(btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
btcoexist->bt_info.agg_buf_size);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Rate Mask", btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Rate Mask", btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
pstdmacase = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
"PS TDMA", coex_dm->ps_tdma_para,
pstdmacase, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
- "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d ",
+ "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d",
- "Coex Table Type", coex_sta->coex_table_type);
+ seq_printf(m, "\n %-35s = %d",
+ "Coex Table Type", coex_sta->coex_table_type);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
coex_dm->backup_ampdu_max_time);
@@ -2666,50 +2656,49 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
- (u32tmp[1] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+ (u32tmp[1] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
- ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2727,26 +2716,26 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
(u32tmp[3] & 0xffff);
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
- coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+ coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
if (btcoexist->auto_report_1ant)
halbtc8723b1ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
index 8d4fde235e11..934f27893c16 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
@@ -220,5 +220,6 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index e8f07573aed9..4907c2ffadfe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -3780,12 +3780,12 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8723b2ant_init_coex_dm(btcoexist);
}
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u32 u32tmp[4];
bool roam = false, scan = false;
@@ -3797,173 +3797,161 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
u32 fw_ver = 0, bt_patch_ver = 0;
u8 ap_num = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========[Under Manual Control]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ==========[Under Manual Control]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
- "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
- "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "Wifi link/ roam/ scan", link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
- "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ seq_printf(m, "\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
(((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
((!wifi_busy) ? "idle" :
((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
"uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
- "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist, bt_link_info->hid_exist,
- bt_link_info->pan_exist, bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8723b_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8723b_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
/* Sw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
- "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
- coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ seq_printf(m,
+ "\n %-35s", "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA", coex_dm->ps_tdma_para,
- ps_tdma_case, coex_dm->auto_tdma_adjust);
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para,
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
- "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
+ coex_dm->cur_ignore_wlan_act);
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Hw setting]============");
+ seq_printf(m, "\n %-35s",
+ "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x778/0x880[29:25]", u8tmp[0],
- (u32tmp[0] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0] & 0x3e000000) >> 25);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x948/ 0x67[5] / 0x765",
- u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
- u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
- ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
- ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+ ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3981,29 +3969,27 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
(u32tmp[3] & 0xffff);
fa_cck = (u8tmp[0] << 8) + u8tmp[1];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "OFDM-CCA/OFDM-FA/CCK-FA",
- u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
- u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
- coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
if (btcoexist->auto_report_2ant)
btc8723b2ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist,
- BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
index bc1e3042e271..aa24da402fb9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -195,7 +195,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
u8 *tmpbuf, u8 length);
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 4efac5fe9982..0b26419881c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -2172,12 +2172,12 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8821a1ant_query_bt_info(btcoexist);
}
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u16 u2_tmp[4];
u32 u4_tmp[4];
@@ -2188,49 +2188,36 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
+ seq_puts(m, "\n ============[BT Coexist info]============");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Under Manual Control]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
+ seq_puts(m, "\n ============[Under Manual Control]============");
+ seq_puts(m, "\n ==========================================");
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[Coex is STOPPED]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ==========================================");
- }
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
+ seq_puts(m, "\n ============[Coex is STOPPED]============");
+ seq_puts(m, "\n ==========================================");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d",
- "Ant PG Num/ Ant Mech/ Ant Pos:",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d",
+ "Ant PG Num/ Ant Mech/ Ant Pos:",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
- "CoexVer/ FwVer/ PatchVer",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
+ seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
&bt_hs_on);
@@ -2238,28 +2225,24 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
&wifi_dot11_chnl);
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
&wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
- "Dot11 channel / HsChnl(HsMode)",
- wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
- "H2C Wifi inform bt chnl Info",
- coex_dm->wifi_chnl_info);
+ seq_printf(m, "\n %-35s = %3ph ",
+ "H2C Wifi inform bt chnl Info",
+ coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
- (int)wifi_rssi, (int)bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+ (int)wifi_rssi, (int)bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
- link, roam, scan);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ link, roam, scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
&wifi_under_5g);
@@ -2269,16 +2252,15 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
&wifi_busy);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
&wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
- (wifi_under_5g ? "5G" : "2.4G"),
- ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
- (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
- ((!wifi_busy) ? "idle" :
- ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
- "uplink" : "downlink")));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+ seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
+ (wifi_under_5g ? "5G" : "2.4G"),
+ ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+ (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+ "uplink" : "downlink")));
+ seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+ "BT [status/ rssi/ retryCnt]",
((coex_sta->bt_disabled) ? ("disabled") :
((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
@@ -2289,166 +2271,143 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
"connected-idle" : "busy")))),
coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
- bt_link_info->sco_exist,
- bt_link_info->hid_exist,
- bt_link_info->pan_exist,
- bt_link_info->a2dp_exist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist,
+ bt_link_info->hid_exist,
+ bt_link_info->pan_exist,
+ bt_link_info->a2dp_exist);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s",
- "BT Info A2DP rate",
- (bt_info_ext & BIT0) ?
- "Basic rate" : "EDR rate");
+ seq_printf(m, "\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext & BIT0) ?
+ "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_1ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_1ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/%s, (0x%x/0x%x)",
- "PS state, IPS/LPS, (lps/rpwm)",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
- btcoexist->bt_info.lps_val,
- btcoexist->bt_info.rpwm_val);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+ "PS state, IPS/LPS, (lps/rpwm)",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
if (!btcoexist->manual_control) {
/* Sw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s",
- "============[Sw mechanism]============");
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d", "SM[LowPenaltyRA]",
- coex_dm->cur_low_penalty_ra);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s/ %s/ %d ",
- "DelBA/ BtCtrlAgg/ AggSize",
- (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
- (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
- btcoexist->bt_info.agg_buf_size);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ", "Rate Mask",
- btcoexist->bt_info.ra_mask);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+
+ seq_printf(m, "\n %-35s = %d", "SM[LowPenaltyRA]",
+ coex_dm->cur_low_penalty_ra);
+
+ seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+ "DelBA/ BtCtrlAgg/ AggSize",
+ (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+ (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+ btcoexist->bt_info.agg_buf_size);
+ seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+ btcoexist->bt_info.ra_mask);
/* Fw mechanism */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d (auto:%d)",
- "PS TDMA",
- coex_dm->ps_tdma_para,
- ps_tdma_case,
- coex_dm->auto_tdma_adjust);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x ",
- "Latest error condition(should be 0)",
+ seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+ "PS TDMA",
+ coex_dm->ps_tdma_para,
+ ps_tdma_case,
+ coex_dm->auto_tdma_adjust);
+
+ seq_printf(m, "\n %-35s = 0x%x ",
+ "Latest error condition(should be 0)",
coex_dm->error_condition);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d ", "IgnWlanAct",
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %d ", "IgnWlanAct",
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ seq_printf(m, "\n %-35s", "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "backup ARFR1/ARFR2/RL/AMaxTime",
- coex_dm->backup_arfr_cnt1,
- coex_dm->backup_arfr_cnt2,
- coex_dm->backup_retry_limit,
- coex_dm->backup_ampdu_max_time);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "backup ARFR1/ARFR2/RL/AMaxTime",
+ coex_dm->backup_arfr_cnt1,
+ coex_dm->backup_arfr_cnt2,
+ coex_dm->backup_retry_limit,
+ coex_dm->backup_ampdu_max_time);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
- "0x430/0x434/0x42a/0x456",
- u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+ "0x430/0x434/0x42a/0x456",
+ u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
- u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+ u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0x8db[6:5]",
- ((u1_tmp[0] & 0x60) >> 5));
+ seq_printf(m, "\n %-35s = 0x%x", "0x8db[6:5]",
+ ((u1_tmp[0] & 0x60) >> 5));
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
- (u4_tmp[0] & 0x30000000) >> 28,
- u4_tmp[0] & 0xff,
- u1_tmp[0] & 0x3);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+ (u4_tmp[0] & 0x30000000) >> 28,
+ u4_tmp[0] & 0xff,
+ u1_tmp[0] & 0x3);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/0x4c[24:23]/0x64[0]",
- u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x40/0x4c[24:23]/0x64[0]",
+ u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23),
+ u1_tmp[1] & 0x1);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
- u4_tmp[0], u1_tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+ u4_tmp[0], u1_tmp[0]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "0xc50(dig)",
- u4_tmp[0] & 0xff);
+ seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+ u4_tmp[0] & 0xff);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
- u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+ u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
- coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+ coex_sta->low_priority_rx, coex_sta->low_priority_tx);
if (btcoexist->auto_report_1ant)
btc8821a1ant_monitor_bt_ctr(btcoexist);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
index b0a6626fbb66..a498ff5b1b9c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
@@ -186,7 +186,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
u8 op_len, u8 *data);
void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 41943c34edff..d5f282cb9d24 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -3657,11 +3657,11 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
btc8821a2ant_init_coex_dm(btcoexist);
}
-void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0;
u32 u4tmp[4];
bool roam = false, scan = false, link = false, wifi_under_5g = false;
@@ -3671,32 +3671,22 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
u8 wifi_dot_11_chnl, wifi_hs_chnl;
u32 fw_ver = 0, bt_patch_ver = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n ============[BT Coexist info]============");
-
- if (!board_info->bt_exist) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
- return;
- }
+ seq_puts(m, "\n ============[BT Coexist info]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
- board_info->pg_ant_num, board_info->btdm_ant_num);
+ seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "[Action Manual control]!!");
+ seq_printf(m, "\n %-35s", "[Action Manual control]!!");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+ seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
((stack_info->profile_notified) ? "Yes" : "No"),
stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+ seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
"CoexVer/ FwVer/ PatchVer",
glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3707,27 +3697,23 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl);
btcoexist->btc_get(btcoexist,
BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d(%d)",
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
"Dot11 channel / HsMode(HsChnl)",
wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %3ph ",
+ seq_printf(m, "\n %-35s = %3ph ",
"H2C Wifi inform bt chnl Info",
coex_dm->wifi_chnl_info);
btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+ seq_printf(m, "\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
wifi_rssi, bt_hs_rssi);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
link, roam, scan);
btcoexist->btc_get(btcoexist,
@@ -3738,8 +3724,7 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
BTC_GET_BL_WIFI_BUSY, &wifi_busy);
btcoexist->btc_get(btcoexist,
BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+ seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
(wifi_under_5g ? "5G" : "2.4G"),
((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
(((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3748,134 +3733,128 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
"uplink" : "downlink")));
if (stack_info->profile_notified) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+ seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
stack_info->sco_exist, stack_info->hid_exist,
stack_info->pan_exist, stack_info->a2dp_exist);
btcoexist->btc_disp_dbg_msg(btcoexist,
- BTC_DBG_DISP_BT_LINK_INFO);
+ BTC_DBG_DISP_BT_LINK_INFO,
+ m);
}
bt_info_ext = coex_sta->bt_info_ext;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
- "BT Info A2DP rate",
+ seq_printf(m, "\n %-35s = %s", "BT Info A2DP rate",
(bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
if (coex_sta->bt_info_c2h_cnt[i]) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %7ph(%d)",
- glbt_info_src_8821a_2ant[i],
- coex_sta->bt_info_c2h[i],
- coex_sta->bt_info_c2h_cnt[i]);
+ seq_printf(m, "\n %-35s = %7ph(%d)",
+ glbt_info_src_8821a_2ant[i],
+ coex_sta->bt_info_c2h[i],
+ coex_sta->bt_info_c2h_cnt[i]);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
- "PS state, IPS/LPS",
- ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
- ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+ seq_printf(m, "\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
/* Sw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Sw mechanism]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d/ %d(0x%x) ",
- "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
- coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
- coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ seq_printf(m, "\n %-35s",
+ "============[Sw mechanism]============");
+ seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
/* Fw mechanism*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
- "============[Fw mechanism]============");
+ seq_printf(m, "\n %-35s",
+ "============[Fw mechanism]============");
if (!btcoexist->manual_control) {
ps_tdma_case = coex_dm->cur_ps_tdma;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %5ph case-%d",
- "PS TDMA",
- coex_dm->ps_tdma_para, ps_tdma_case);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
- coex_dm->cur_dec_bt_pwr_lvl,
- coex_dm->cur_ignore_wlan_act);
+ seq_printf(m, "\n %-35s = %5ph case-%d",
+ "PS TDMA",
+ coex_dm->ps_tdma_para, ps_tdma_case);
+
+ seq_printf(m, "\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+ coex_dm->cur_dec_bt_pwr_lvl,
+ coex_dm->cur_ignore_wlan_act);
}
/* Hw setting*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s", "============[Hw setting]============");
+ seq_printf(m, "\n %-35s", "============[Hw setting]============");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
- coex_dm->bt_rf0x1e_backup);
+ seq_printf(m, "\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+ coex_dm->bt_rf0x1e_backup);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
- "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
- u1tmp[0], u1tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x ",
+ "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+ u1tmp[0], u1tmp[1]);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x8db(ADC)/0xc5b[29:25](DAC)",
- ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x8db(ADC)/0xc5b[29:25](DAC)",
+ ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
- u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+ u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x40/ 0x4c[24:23]/ 0x974",
- u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x40/ 0x4c[24:23]/ 0x974",
+ u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0x550(bcn ctrl)/0x522",
- u4tmp[0], u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "0xc50(DIG)/0xa0a(CCK-TH)",
- u4tmp[0], u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "0xc50(DIG)/0xa0a(CCK-TH)",
+ u4tmp[0], u1tmp[0]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
- "OFDM-FA/ CCK-FA",
- u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+ "OFDM-FA/ CCK-FA",
+ u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
- "0x6c0/0x6c4/0x6c8",
- u4tmp[0], u4tmp[1], u4tmp[2]);
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
- "0x770 (hi-pri Rx/Tx)",
- coex_sta->high_priority_rx, coex_sta->high_priority_tx);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
+ seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8",
+ u4tmp[0], u4tmp[1], u4tmp[2]);
+
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "0x770 (hi-pri Rx/Tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ seq_printf(m, "\n %-35s = %d/ %d",
"0x774(low-pri Rx/Tx)",
coex_sta->low_priority_rx, coex_sta->low_priority_tx);
/* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
- "0x41b (mgntQ hang chk == 0xf)",
- u1tmp[0]);
+ seq_printf(m, "\n %-35s = 0x%x",
+ "0x41b (mgntQ hang chk == 0xf)",
+ u1tmp[0]);
- btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
}
void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
index a839d5574422..ce3e58c4e660 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
@@ -226,7 +226,8 @@ ex_btc8821a2ant_periodical(
);
void
ex_btc8821a2ant_display_coex_info(
- struct btc_coexist *btcoexist
+ struct btc_coexist *btcoexist,
+ struct seq_file *m
);
void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b5e9877d935c..1404729441a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -25,19 +25,11 @@
#include "halbt_precomp.h"
-/***********************************************
- * Global variables
- ***********************************************/
-
-struct btc_coexist gl_bt_coexist;
-
-u32 btc_dbg_type[BTC_MSG_MAX];
-
/***************************************************
* Debug related function
***************************************************/
-const char *const gl_btc_wifi_bw_string[] = {
+static const char *const gl_btc_wifi_bw_string[] = {
"11bg",
"HT20",
"HT40",
@@ -45,7 +37,7 @@ const char *const gl_btc_wifi_bw_string[] = {
"HT160"
};
-const char *const gl_btc_wifi_freq_string[] = {
+static const char *const gl_btc_wifi_freq_string[] = {
"2.4G",
"5G"
};
@@ -103,21 +95,6 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist)
return false;
}
-static bool halbtc_is_bt40(struct rtl_priv *adapter)
-{
- struct rtl_priv *rtlpriv = adapter;
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool is_ht40 = true;
- enum ht_channel_width bw = rtlphy->current_chan_bw;
-
- if (bw == HT_CHANNEL_WIDTH_20)
- is_ht40 = false;
- else if (bw == HT_CHANNEL_WIDTH_20_40)
- is_ht40 = true;
-
- return is_ht40;
-}
-
static bool halbtc_legacy(struct rtl_priv *adapter)
{
struct rtl_priv *rtlpriv = adapter;
@@ -143,18 +120,26 @@ bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv =
- (struct rtl_priv *)btcoexist->adapter;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 wifi_bw = BTC_WIFI_BW_HT20;
- if (halbtc_is_bt40(rtlpriv)) {
- wifi_bw = BTC_WIFI_BW_HT40;
+ if (halbtc_legacy(rtlpriv)) {
+ wifi_bw = BTC_WIFI_BW_LEGACY;
} else {
- if (halbtc_legacy(rtlpriv))
- wifi_bw = BTC_WIFI_BW_LEGACY;
- else
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
wifi_bw = BTC_WIFI_BW_HT20;
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ wifi_bw = BTC_WIFI_BW_HT40;
+ break;
+ case HT_CHANNEL_WIDTH_80:
+ wifi_bw = BTC_WIFI_BW_HT80;
+ break;
+ }
}
+
return wifi_bw;
}
@@ -171,7 +156,7 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
return chnl;
}
-u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
{
struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
@@ -186,12 +171,12 @@ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
return rtlpriv->btcoexist.btc_info.single_ant_path;
}
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
{
return rtlpriv->btcoexist.btc_info.bt_type;
}
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
{
struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
u8 num;
@@ -208,13 +193,117 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
return num;
}
-u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
+static u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
{
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
return rtlhal->package_type;
}
+static
+u8 rtl_get_hwpg_rfe_type(struct rtl_priv *rtlpriv)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ return rtlhal->rfe_type;
+}
+
+static
+bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist)
+{
+ if (IS_HARDWARE_TYPE_8812(btcoexist->adapter))
+ return false;
+ else
+ return true;
+}
+
+static
+bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
+ u8 *cmd, u32 len, unsigned long wait_ms)
+{
+ struct rtl_priv *rtlpriv;
+ const u8 oper_ver = 0;
+ u8 req_num;
+
+ if (!halbtc_is_hw_mailbox_exist(btcoexist))
+ return false;
+
+ if (wait_ms) /* before h2c to avoid race condition */
+ reinit_completion(&btcoexist->bt_mp_comp);
+
+ rtlpriv = btcoexist->adapter;
+
+ /* fill req_num by op_code, and rtl_btc_btmpinfo_notify() use it
+ * to know message type
+ */
+ switch (op_code) {
+ case BT_OP_GET_BT_VERSION:
+ req_num = BT_SEQ_GET_BT_VERSION;
+ break;
+ case BT_OP_GET_AFH_MAP_L:
+ req_num = BT_SEQ_GET_AFH_MAP_L;
+ break;
+ case BT_OP_GET_AFH_MAP_M:
+ req_num = BT_SEQ_GET_AFH_MAP_M;
+ break;
+ case BT_OP_GET_AFH_MAP_H:
+ req_num = BT_SEQ_GET_AFH_MAP_H;
+ break;
+ case BT_OP_GET_BT_COEX_SUPPORTED_FEATURE:
+ req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE;
+ break;
+ case BT_OP_GET_BT_COEX_SUPPORTED_VERSION:
+ req_num = BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION;
+ break;
+ case BT_OP_GET_BT_ANT_DET_VAL:
+ req_num = BT_SEQ_GET_BT_ANT_DET_VAL;
+ break;
+ case BT_OP_GET_BT_BLE_SCAN_PARA:
+ req_num = BT_SEQ_GET_BT_BLE_SCAN_PARA;
+ break;
+ case BT_OP_GET_BT_BLE_SCAN_TYPE:
+ req_num = BT_SEQ_GET_BT_BLE_SCAN_TYPE;
+ break;
+ case BT_OP_GET_BT_DEVICE_INFO:
+ req_num = BT_SEQ_GET_BT_DEVICE_INFO;
+ break;
+ case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+ req_num = BT_SEQ_GET_BT_FORB_SLOT_VAL;
+ break;
+ case BT_OP_WRITE_REG_ADDR:
+ case BT_OP_WRITE_REG_VALUE:
+ case BT_OP_READ_REG:
+ default:
+ req_num = BT_SEQ_DONT_CARE;
+ break;
+ }
+
+ cmd[0] |= (oper_ver & 0x0f); /* Set OperVer */
+ cmd[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
+ cmd[1] = op_code;
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, len, cmd);
+
+ /* wait? */
+ if (!wait_ms)
+ return true;
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
+
+ if (in_interrupt())
+ return false;
+
+ if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
+ msecs_to_jiffies(wait_ms)) == 0) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "btmpinfo wait (req_num=%d) timeout\n", req_num);
+
+ return false; /* timeout */
+ }
+
+ return true;
+}
+
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
@@ -342,25 +431,80 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist)
static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
{
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 cmd_buffer[4] = {0};
- u8 oper_ver = 0;
- u8 req_num = 0x0E;
if (btcoexist->bt_info.bt_real_fw_ver)
goto label_done;
- cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer[0]);
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_VERSION,
+ cmd_buffer, 4, 200);
label_done:
return btcoexist->bt_info.bt_real_fw_ver;
}
-u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
+static u32 halbtc_get_bt_coex_supported_feature(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ if (btcoexist->bt_info.bt_supported_feature)
+ goto label_done;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_COEX_SUPPORTED_FEATURE,
+ cmd_buffer, 4, 200);
+
+label_done:
+ return btcoexist->bt_info.bt_supported_feature;
+}
+
+static u32 halbtc_get_bt_coex_supported_version(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ if (btcoexist->bt_info.bt_supported_version)
+ goto label_done;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_COEX_SUPPORTED_VERSION,
+ cmd_buffer, 4, 200);
+
+label_done:
+ return btcoexist->bt_info.bt_supported_version;
+}
+
+static u32 halbtc_get_bt_device_info(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_DEVICE_INFO,
+ cmd_buffer, 4, 200);
+
+ return btcoexist->bt_info.bt_device_info;
+}
+
+static u32 halbtc_get_bt_forbidden_slot_val(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist,
+ BT_OP_GET_BT_FORBIDDEN_SLOT_VAL,
+ cmd_buffer, 4, 200);
+
+ return btcoexist->bt_info.bt_forb_slot_val;
+}
+
+static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
{
/* return value:
* [31:16] => connected port number
@@ -521,6 +665,18 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
case BTC_GET_U4_VENDOR:
*u32_tmp = BTC_VENDOR_OTHER;
break;
+ case BTC_GET_U4_SUPPORTED_VERSION:
+ *u32_tmp = halbtc_get_bt_coex_supported_version(btcoexist);
+ break;
+ case BTC_GET_U4_SUPPORTED_FEATURE:
+ *u32_tmp = halbtc_get_bt_coex_supported_feature(btcoexist);
+ break;
+ case BTC_GET_U4_BT_DEVICE_INFO:
+ *u32_tmp = halbtc_get_bt_device_info(btcoexist);
+ break;
+ case BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL:
+ *u32_tmp = halbtc_get_bt_forbidden_slot_val(btcoexist);
+ break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
*u8_tmp = rtlphy->current_channel;
break;
@@ -653,6 +809,104 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
return ret;
}
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+}
+
+static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
+ struct seq_file *m)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ bool scan = false, link = false, roam = false, wifi_busy = false;
+ bool wifi_under_b_mode = false;
+ bool wifi_under_5g = false;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+ u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX;
+ u32 wifi_freq = BTC_FREQ_2_4G;
+ u32 wifi_link_status = 0x0;
+ bool bt_hs_on = false, under_ips = false, under_lps = false;
+ bool low_power = false, dc_mode = false;
+ u8 wifi_chnl = 0, wifi_hs_chnl = 0;
+ u8 ap_num = 0;
+
+ wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+ "STA/vWifi/HS/p2pGo/p2pGc",
+ ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+ seq_printf(m, "\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(High Speed)",
+ wifi_chnl, wifi_hs_chnl, bt_hs_on);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ seq_printf(m, "\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi",
+ wifi_rssi - 100, bt_hs_rssi - 100);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan",
+ link, roam, scan);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+ wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+ &wifi_under_b_mode);
+
+ seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ",
+ "Wifi freq/ bw/ traffic",
+ gl_btc_wifi_freq_string[wifi_freq],
+ ((wifi_under_b_mode) ? "11b" :
+ gl_btc_wifi_bw_string[wifi_bw]),
+ ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX ==
+ wifi_traffic_dir) ? "uplink" :
+ "downlink")),
+ ap_num);
+
+ /* power status */
+ dc_mode = true; /*TODO*/
+ under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
+ under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
+ low_power = 0; /*TODO*/
+ seq_printf(m, "\n %-35s = %s%s%s%s",
+ "Power Status",
+ (dc_mode ? "DC mode" : "AC mode"),
+ (under_ips ? ", IPS ON" : ""),
+ (under_lps ? ", LPS ON" : ""),
+ (low_power ? ", 32k" : ""));
+
+ seq_printf(m,
+ "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
+ "Power mode cmd(lps/rpwm)",
+ btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
+ btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
+ btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
+ btcoexist->bt_info.lps_val,
+ btcoexist->bt_info.rpwm_val);
+}
+
/************************************************************
* IO related function
************************************************************/
@@ -726,7 +980,8 @@ static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
rtl_write_dword(rtlpriv, reg_addr, data);
}
-void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
+static void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr,
+ u8 data)
{
struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -739,22 +994,6 @@ void halbtc_write_local_reg_1byte(void *btc_context, u32 reg_addr, u8 data)
rtl_write_byte(rtlpriv, reg_addr, data);
}
-void halbtc_set_macreg(void *btc_context, u32 reg_addr, u32 bit_mask, u32 data)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
-}
-
-u32 halbtc_get_macreg(void *btc_context, u32 reg_addr, u32 bit_mask)
-{
- struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
-
- return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
-}
-
static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
u32 data)
{
@@ -800,38 +1039,47 @@ static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
cmd_len, cmd_buf);
}
+static
void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
{
struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
- struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 cmd_buffer1[4] = {0};
u8 cmd_buffer2[4] = {0};
- u8 *addr_to_set = (u8 *)&offset;
- u8 *value_to_set = (u8 *)&set_val;
- u8 oper_ver = 0;
- u8 req_num = 0;
- if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
- cmd_buffer1[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer1[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer1[1] = 0x0d; /* OpCode: BT_LO_OP_WRITE_REG_VALUE */
- cmd_buffer1[2] = value_to_set[0]; /* Set WriteRegValue */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer1[0]);
-
- msleep(200);
- req_num++;
-
- cmd_buffer2[0] |= (oper_ver & 0x0f); /* Set OperVer */
- cmd_buffer2[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */
- cmd_buffer2[1] = 0x0c; /* OpCode: BT_LO_OP_WRITE_REG_ADDR */
- cmd_buffer2[3] = addr_to_set[0]; /* Set WriteRegAddr */
- rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4,
- &cmd_buffer2[0]);
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ *((__le16 *)&cmd_buffer1[2]) = cpu_to_le16((u16)set_val);
+ if (!halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_VALUE,
+ cmd_buffer1, 4, 200))
+ return;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ cmd_buffer2[2] = reg_type;
+ *((u8 *)&cmd_buffer2[3]) = (u8)offset;
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_WRITE_REG_ADDR,
+ cmd_buffer2, 4, 200);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type,
+ struct seq_file *m)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist, m);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist, m);
+ break;
+ case BTC_DBG_DISP_WIFI_STATUS:
+ halbtc_display_wifi_status(btcoexist, m);
+ break;
+ default:
+ break;
}
}
-bool halbtc_under_ips(struct btc_coexist *btcoexist)
+static bool halbtc_under_ips(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
@@ -849,12 +1097,95 @@ bool halbtc_under_ips(struct btc_coexist *btcoexist)
return false;
}
+static u8 halbtc_get_ant_det_val_from_bt(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_ANT_DET_VAL,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ant_det_val;
+}
+
+static u8 halbtc_get_ble_scan_type_from_bt(void *btc_context)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_TYPE,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ble_scan_type;
+}
+
+static u32 halbtc_get_ble_scan_para_from_bt(void *btc_context, u8 scan_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[4] = {0};
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_BT_BLE_SCAN_PARA,
+ cmd_buffer, 4, 200);
+
+ /* need wait completion to return correct value */
+
+ return btcoexist->bt_info.bt_ble_scan_para;
+}
+
+static bool halbtc_get_bt_afh_map_from_bt(void *btc_context, u8 map_type,
+ u8 *afh_map)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)btc_context;
+ u8 cmd_buffer[2] = {0};
+ bool ret;
+ u32 *afh_map_l = (u32 *)afh_map;
+ u32 *afh_map_m = (u32 *)(afh_map + 4);
+ u16 *afh_map_h = (u16 *)(afh_map + 8);
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_L,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_l = btcoexist->bt_info.afh_map_l;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_M,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_m = btcoexist->bt_info.afh_map_m;
+
+ /* cmd_buffer[0] and [1] is filled by halbtc_send_bt_mp_operation() */
+ ret = halbtc_send_bt_mp_operation(btcoexist, BT_OP_GET_AFH_MAP_H,
+ cmd_buffer, 2, 200);
+ if (!ret)
+ goto exit;
+
+ *afh_map_h = btcoexist->bt_info.afh_map_h;
+
+exit:
+ return ret;
+}
+
/*****************************************************************
* Extern functions called by other module
*****************************************************************/
-bool exhalbtc_initlize_variables(void)
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
halbtc_dbg_init();
@@ -874,24 +1205,76 @@ bool exhalbtc_initlize_variables(void)
btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
btcoexist->btc_get = halbtc_get;
btcoexist->btc_set = halbtc_set;
btcoexist->btc_set_bt_reg = halbtc_set_bt_reg;
-
btcoexist->bt_info.bt_ctrl_buf_size = false;
btcoexist->bt_info.agg_buf_size = 5;
btcoexist->bt_info.increase_scan_dev_num = false;
+
+ btcoexist->btc_get_bt_coex_supported_feature =
+ halbtc_get_bt_coex_supported_feature;
+ btcoexist->btc_get_bt_coex_supported_version =
+ halbtc_get_bt_coex_supported_version;
+ btcoexist->btc_get_ant_det_val_from_bt = halbtc_get_ant_det_val_from_bt;
+ btcoexist->btc_get_ble_scan_type_from_bt =
+ halbtc_get_ble_scan_type_from_bt;
+ btcoexist->btc_get_ble_scan_para_from_bt =
+ halbtc_get_ble_scan_para_from_bt;
+ btcoexist->btc_get_bt_afh_map_from_bt =
+ halbtc_get_bt_afh_map_from_bt;
+
+ init_completion(&btcoexist->bt_mp_comp);
+
+ return true;
+}
+
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ struct wifi_only_haldata *wifionly_haldata;
+
+ if (!wifionly_cfg)
+ return false;
+
+ wifionly_cfg->adapter = rtlpriv;
+
+ switch (rtlpriv->rtlhal.interface) {
+ case INTF_PCI:
+ wifionly_cfg->chip_interface = BTC_INTF_PCI;
+ break;
+ case INTF_USB:
+ wifionly_cfg->chip_interface = BTC_INTF_USB;
+ break;
+ default:
+ wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
+ break;
+ }
+
+ wifionly_haldata = &wifionly_cfg->haldata_info;
+
+ wifionly_haldata->customer_id = CUSTOMER_NORMAL;
+ wifionly_haldata->efuse_pg_antnum = rtl_get_hwpg_ant_num(rtlpriv);
+ wifionly_haldata->efuse_pg_antpath =
+ rtl_get_hwpg_single_ant_path(rtlpriv);
+ wifionly_haldata->rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+ wifionly_haldata->ant_div_cfg = 0;
+
return true;
}
bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
struct rtl_priv *rtlpriv = adapter;
- u8 ant_num = 2, chip_type;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+ u8 ant_num = 2, chip_type, single_ant_path = 0;
+
+ if (!btcoexist)
+ return false;
if (btcoexist->binded)
return false;
@@ -922,10 +1305,16 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
btcoexist->bt_info.miracast_plus_bt = false;
chip_type = rtl_get_hwpg_bt_type(rtlpriv);
- exhalbtc_set_chip_type(chip_type);
+ exhalbtc_set_chip_type(btcoexist, chip_type);
ant_num = rtl_get_hwpg_ant_num(rtlpriv);
exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ /* set default antenna position to main port */
+ btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+
+ single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
+ exhalbtc_set_single_ant_path(btcoexist, single_ant_path);
+
if (rtl_get_hwpg_package_type(rtlpriv) == 0)
btcoexist->board_info.tfbga_package = false;
else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
@@ -940,6 +1329,9 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], Package Type = Non-TFBGA\n");
+ btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
+ btcoexist->board_info.ant_div_cfg = 0;
+
return true;
}
@@ -996,6 +1388,10 @@ void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only)
}
}
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg)
+{
+}
+
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
@@ -1122,6 +1518,11 @@ void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
halbtc_normal_low_power(btcoexist);
}
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g)
+{
+}
+
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
{
u8 asso_type;
@@ -1430,30 +1831,25 @@ void exhalbtc_stack_update_profile_info(void)
{
}
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi)
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->stack_info.min_bt_rssi = bt_rssi;
}
-void exhalbtc_set_hci_version(u16 hci_version)
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
btcoexist->stack_info.hci_version = hci_version;
}
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+ u16 bt_hci_version, u16 bt_patch_version)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
-
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
@@ -1461,7 +1857,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
btcoexist->bt_info.bt_hci_ver = bt_hci_version;
}
-void exhalbtc_set_chip_type(u8 chip_type)
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type)
{
switch (chip_type) {
default:
@@ -1469,51 +1865,58 @@ void exhalbtc_set_chip_type(u8 chip_type)
case BT_ISSC_3WIRE:
case BT_ACCEL:
case BT_RTL8756:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_UNDEF;
break;
case BT_CSR_BC4:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
break;
case BT_CSR_BC8:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
break;
case BT_RTL8723A:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723A;
break;
case BT_RTL8821A:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8821;
break;
case BT_RTL8723B:
- gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ btcoexist->board_info.bt_chip_type = BTC_CHIP_RTL8723B;
break;
}
}
void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
if (BT_COEX_ANT_TYPE_PG == type) {
- gl_bt_coexist.board_info.pg_ant_num = ant_num;
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.pg_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
- gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ btcoexist->board_info.btdm_ant_num = ant_num;
if (rtlpriv->cfg->mod_params->ant_sel == 1)
- gl_bt_coexist.board_info.btdm_ant_pos =
+ btcoexist->board_info.btdm_ant_pos =
BTC_ANTENNA_AT_AUX_PORT;
else
- gl_bt_coexist.board_info.btdm_ant_pos =
+ btcoexist->board_info.btdm_ant_pos =
BTC_ANTENNA_AT_MAIN_PORT;
}
}
/* Currently used by 8723b only, S0 or S1 */
-void exhalbtc_set_single_ant_path(u8 single_ant_path)
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+ u8 single_ant_path)
{
- gl_bt_coexist.board_info.single_ant_path = single_ant_path;
+ btcoexist->board_info.single_ant_path = single_ant_path;
}
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m)
{
if (!halbtc_is_bt_coexist_available(btcoexist))
return;
@@ -1522,18 +1925,36 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8821a2ant_display_coex_info(btcoexist);
+ ex_btc8821a2ant_display_coex_info(btcoexist, m);
else if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8821a1ant_display_coex_info(btcoexist);
+ ex_btc8821a1ant_display_coex_info(btcoexist, m);
} else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8723b2ant_display_coex_info(btcoexist);
+ ex_btc8723b2ant_display_coex_info(btcoexist, m);
else if (btcoexist->board_info.btdm_ant_num == 1)
- ex_btc8723b1ant_display_coex_info(btcoexist);
+ ex_btc8723b1ant_display_coex_info(btcoexist, m);
} else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
if (btcoexist->board_info.btdm_ant_num == 2)
- ex_btc8192e2ant_display_coex_info(btcoexist);
+ ex_btc8192e2ant_display_coex_info(btcoexist, m);
}
halbtc_normal_low_power(btcoexist);
}
+
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (btcoexist->manual_control)
+ return;
+
+ halbtc_leave_low_power(btcoexist);
+
+ halbtc_normal_low_power(btcoexist);
+}
+
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index f9b87c12db09..8ed217656539 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -103,8 +103,6 @@ enum btc_msg_type {
BTC_MSG_MAX
};
-extern u32 btc_dbg_type[];
-
/* following is for BTC_MSG_INTERFACE */
#define INTF_INIT BIT0
#define INTF_NOTIFY BIT2
@@ -152,8 +150,10 @@ struct btc_board_info {
u8 btdm_ant_num; /* ant number for btdm */
u8 btdm_ant_pos;
u8 single_ant_path; /* current used for 8723b only, 1=>s0, 0=>s1 */
- bool bt_exist;
bool tfbga_package;
+
+ u8 rfe_type;
+ u8 ant_div_cfg;
};
enum btc_dbg_opcode {
@@ -181,10 +181,17 @@ enum btc_wifi_role {
BTC_ROLE_MAX
};
+enum btc_wireless_freq {
+ BTC_FREQ_2_4G = 0x0,
+ BTC_FREQ_5G = 0x1,
+ BTC_FREQ_MAX
+};
+
enum btc_wifi_bw_mode {
BTC_WIFI_BW_LEGACY = 0x0,
BTC_WIFI_BW_HT20 = 0x1,
BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_HT80 = 0x3,
BTC_WIFI_BW_MAX
};
@@ -275,6 +282,8 @@ enum btc_get_type {
BTC_GET_U4_VENDOR,
BTC_GET_U4_SUPPORTED_VERSION,
BTC_GET_U4_SUPPORTED_FEATURE,
+ BTC_GET_U4_BT_DEVICE_INFO,
+ BTC_GET_U4_BT_FORBIDDEN_SLOT_VAL,
BTC_GET_U4_WIFI_IQK_TOTAL,
BTC_GET_U4_WIFI_IQK_OK,
BTC_GET_U4_WIFI_IQK_FAIL,
@@ -355,6 +364,7 @@ enum btc_dbg_disp_type {
BTC_DBG_DISP_BT_LINK_INFO = 0x1,
BTC_DBG_DISP_BT_FW_VER = 0x2,
BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_WIFI_STATUS = 0x04,
BTC_DBG_DISP_MAX
};
@@ -376,6 +386,14 @@ enum btc_notify_type_scan {
BTC_SCAN_MAX
};
+enum btc_notify_type_switchband {
+ BTC_NOT_SWITCH = 0x0,
+ BTC_SWITCH_TO_24G = 0x1,
+ BTC_SWITCH_TO_5G = 0x2,
+ BTC_SWITCH_TO_24G_NOFORSCAN = 0x3,
+ BTC_SWITCH_MAX
+};
+
enum btc_notify_type_associate {
BTC_ASSOCIATE_FINISH = 0x0,
BTC_ASSOCIATE_START = 0x1,
@@ -417,49 +435,6 @@ enum btc_notify_type_stack_operation {
BTC_STACK_OP_MAX
};
-typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
-
-typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
-
-typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
-
-typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u8 data1b);
-
-typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
-
-typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
-
-typedef void (*bfp_btc_local_reg_w1)(void *btc_context, u32 reg_addr, u8 data);
-typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
- u8 bit_mask, u8 data);
-
-typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
- u32 bit_mask);
-
-typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
- u32 bit_mask, u32 data);
-
-typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
- u32 reg_addr, u32 bit_mask);
-
-typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
- u32 cmd_len, u8 *cmd_buffer);
-
-typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
-
-typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
-
-typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
- u32 value);
-
-typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
-
struct btc_bt_info {
bool bt_disabled;
u8 rssi_adjust_for_agc_table_on;
@@ -491,6 +466,17 @@ struct btc_bt_info {
u8 lps_val;
u8 rpwm_val;
u32 ra_mask;
+
+ u32 afh_map_l;
+ u32 afh_map_m;
+ u16 afh_map_h;
+ u32 bt_supported_feature;
+ u32 bt_supported_version;
+ u32 bt_device_info;
+ u32 bt_forb_slot_val;
+ u8 bt_ant_det_val;
+ u8 bt_ble_scan_type;
+ u32 bt_ble_scan_para;
};
struct btc_stack_info {
@@ -546,6 +532,40 @@ enum btc_antenna_pos {
BTC_ANTENNA_AT_AUX_PORT = 0x2,
};
+enum btc_mp_h2c_op_code {
+ BT_OP_GET_BT_VERSION = 0,
+ BT_OP_WRITE_REG_ADDR = 12,
+ BT_OP_WRITE_REG_VALUE = 13,
+ BT_OP_READ_REG = 17,
+ BT_OP_GET_AFH_MAP_L = 30,
+ BT_OP_GET_AFH_MAP_M = 31,
+ BT_OP_GET_AFH_MAP_H = 32,
+ BT_OP_GET_BT_COEX_SUPPORTED_FEATURE = 42,
+ BT_OP_GET_BT_COEX_SUPPORTED_VERSION = 43,
+ BT_OP_GET_BT_ANT_DET_VAL = 44,
+ BT_OP_GET_BT_BLE_SCAN_PARA = 45,
+ BT_OP_GET_BT_BLE_SCAN_TYPE = 46,
+ BT_OP_GET_BT_DEVICE_INFO = 48,
+ BT_OP_GET_BT_FORBIDDEN_SLOT_VAL = 49,
+ BT_OP_MAX
+};
+
+enum btc_mp_h2c_req_num {
+ /* 4 bits only */
+ BT_SEQ_DONT_CARE = 0,
+ BT_SEQ_GET_BT_VERSION = 0xE,
+ BT_SEQ_GET_AFH_MAP_L = 0x5,
+ BT_SEQ_GET_AFH_MAP_M = 0x6,
+ BT_SEQ_GET_AFH_MAP_H = 0x9,
+ BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE = 0x7,
+ BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION = 0x8,
+ BT_SEQ_GET_BT_ANT_DET_VAL = 0x2,
+ BT_SEQ_GET_BT_BLE_SCAN_PARA = 0x3,
+ BT_SEQ_GET_BT_BLE_SCAN_TYPE = 0x4,
+ BT_SEQ_GET_BT_DEVICE_INFO = 0xA,
+ BT_SEQ_GET_BT_FORB_SLOT_VAL = 0xB,
+};
+
struct btc_coexist {
/* make sure only one adapter can bind the data context */
bool binded;
@@ -563,55 +583,86 @@ struct btc_coexist {
*/
bool auto_report_1ant;
bool auto_report_2ant;
+ bool dbg_mode_1ant;
+ bool dbg_mode_2ant;
bool initilized;
bool stop_coex_dm;
bool manual_control;
struct btc_statistics statistics;
u8 pwr_mode_val[10];
- /* function pointers - io related */
- bfp_btc_r1 btc_read_1byte;
- bfp_btc_w1 btc_write_1byte;
- bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
- bfp_btc_r2 btc_read_2byte;
- bfp_btc_w2 btc_write_2byte;
- bfp_btc_r4 btc_read_4byte;
- bfp_btc_w4 btc_write_4byte;
- bfp_btc_local_reg_w1 btc_write_local_reg_1byte;
-
- bfp_btc_set_bb_reg btc_set_bb_reg;
- bfp_btc_get_bb_reg btc_get_bb_reg;
+ struct completion bt_mp_comp;
- bfp_btc_set_rf_reg btc_set_rf_reg;
- bfp_btc_get_rf_reg btc_get_rf_reg;
-
- bfp_btc_fill_h2c btc_fill_h2c;
-
- bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
-
- bfp_btc_get btc_get;
- bfp_btc_set btc_set;
-
- bfp_btc_set_bt_reg btc_set_bt_reg;
+ /* function pointers - io related */
+ u8 (*btc_read_1byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_1byte)(void *btc_context, u32 reg_addr, u32 data);
+ void (*btc_write_1byte_bitmask)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u8 data1b);
+ u16 (*btc_read_2byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_2byte)(void *btc_context, u32 reg_addr, u16 data);
+ u32 (*btc_read_4byte)(void *btc_context, u32 reg_addr);
+ void (*btc_write_4byte)(void *btc_context, u32 reg_addr, u32 data);
+
+ void (*btc_write_local_reg_1byte)(void *btc_context, u32 reg_addr,
+ u8 data);
+ void (*btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+ u32 (*btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+ void (*btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+ u32 (*btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+ void (*btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+ void (*btc_disp_dbg_msg)(void *btcoexist, u8 disp_type,
+ struct seq_file *m);
+
+ bool (*btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+ bool (*btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+ void (*btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
+ u32 value);
+ u32 (*btc_get_bt_coex_supported_feature)(void *btcoexist);
+ u32 (*btc_get_bt_coex_supported_version)(void *btcoexist);
+ u8 (*btc_get_ant_det_val_from_bt)(void *btcoexist);
+ u8 (*btc_get_ble_scan_type_from_bt)(void *btcoexist);
+ u32 (*btc_get_ble_scan_para_from_bt)(void *btcoexist, u8 scan_type);
+ bool (*btc_get_bt_afh_map_from_bt)(void *btcoexist, u8 map_type,
+ u8 *afh_map);
};
bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
-extern struct btc_coexist gl_bt_coexist;
+#define rtl_btc_coexist(rtlpriv) \
+ ((struct btc_coexist *)((rtlpriv)->btcoexist.btc_context))
+#define rtl_btc_wifi_only(rtlpriv) \
+ ((struct wifi_only_cfg *)((rtlpriv)->btcoexist.wifi_only_context))
-bool exhalbtc_initlize_variables(void);
+struct wifi_only_cfg;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *rtlpriv);
+bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv);
bool exhalbtc_bind_bt_coex_withadapter(void *adapter);
+void exhalbtc_power_on_setting(struct btc_coexist *btcoexist);
+void exhalbtc_pre_load_firmware(struct btc_coexist *btcoexist);
void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only);
+void exhalbtc_init_hw_config_wifi_only(struct wifi_only_cfg *wifionly_cfg);
void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g);
void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
enum rt_media_status media_status);
void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
u8 length);
+void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
@@ -619,18 +670,63 @@ void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist);
void exhalbtc_periodical(struct btc_coexist *btcoexist);
void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
u8 *data);
+void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq,
+ u32 offset, u32 span, u32 seconds);
void exhalbtc_stack_update_profile_info(void);
-void exhalbtc_set_hci_version(u16 hci_version);
-void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
-void exhalbtc_update_min_bt_rssi(s8 bt_rssi);
-void exhalbtc_set_bt_exist(bool bt_exist);
-void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version);
+void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist,
+ u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi);
+void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist);
+void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type);
void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+ struct seq_file *m);
+void exhalbtc_switch_band_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_switch_band_notify_wifi_only(struct wifi_only_cfg *wifionly_cfg,
+ u8 is_5g);
void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
u8 *rssi_wifi, u8 *rssi_bt);
void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
-void exhalbtc_set_single_ant_path(u8 single_ant_path);
+void exhalbtc_set_single_ant_path(struct btc_coexist *btcoexist,
+ u8 single_ant_path);
+
+/* The following are used by wifi_only case */
+enum wifionly_chip_interface {
+ WIFIONLY_INTF_UNKNOWN = 0,
+ WIFIONLY_INTF_PCI = 1,
+ WIFIONLY_INTF_USB = 2,
+ WIFIONLY_INTF_SDIO = 3,
+ WIFIONLY_INTF_MAX
+};
+
+enum wifionly_customer_id {
+ CUSTOMER_NORMAL = 0,
+ CUSTOMER_HP_1 = 1,
+};
+
+struct wifi_only_haldata {
+ u16 customer_id;
+ u8 efuse_pg_antnum;
+ u8 efuse_pg_antpath;
+ u8 rfe_type;
+ u8 ant_div_cfg;
+};
+
+struct wifi_only_cfg {
+ void *adapter;
+ struct wifi_only_haldata haldata_info;
+ enum wifionly_chip_interface chip_interface;
+};
+
+static inline
+void halwifionly_phy_set_bb_reg(struct wifi_only_cfg *wifi_conly_cfg,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = (struct rtl_priv *)wifi_conly_cfg->adapter;
+
+ rtl_set_bbreg(rtlpriv->hw, regaddr, bitmask, data);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index 7d296a401b6f..cce4a37ea408 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -31,11 +31,16 @@
static struct rtl_btc_ops rtl_btc_operation = {
.btc_init_variables = rtl_btc_init_variables,
+ .btc_init_variables_wifi_only = rtl_btc_init_variables_wifi_only,
+ .btc_deinit_variables = rtl_btc_deinit_variables,
.btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_power_on_setting = rtl_btc_power_on_setting,
.btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_init_hw_config_wifi_only = rtl_btc_init_hw_config_wifi_only,
.btc_ips_notify = rtl_btc_ips_notify,
.btc_lps_notify = rtl_btc_lps_notify,
.btc_scan_notify = rtl_btc_scan_notify,
+ .btc_scan_notify_wifi_only = rtl_btc_scan_notify_wifi_only,
.btc_connect_notify = rtl_btc_connect_notify,
.btc_mediastatus_notify = rtl_btc_mediastatus_notify,
.btc_periodical = rtl_btc_periodical,
@@ -46,63 +51,143 @@ static struct rtl_btc_ops rtl_btc_operation = {
.btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
.btc_is_bt_disabled = rtl_btc_is_bt_disabled,
.btc_special_packet_notify = rtl_btc_special_packet_notify,
+ .btc_switch_band_notify = rtl_btc_switch_band_notify,
+ .btc_switch_band_notify_wifi_only = rtl_btc_switch_band_notify_wifionly,
.btc_record_pwr_mode = rtl_btc_record_pwr_mode,
.btc_get_lps_val = rtl_btc_get_lps_val,
.btc_get_rpwm_val = rtl_btc_get_rpwm_val,
.btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
.btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
.btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
+ .btc_display_bt_coex_info = rtl_btc_display_bt_coex_info,
};
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist) {
+ seq_puts(m, "btc_coexist context is NULL!\n");
+ return;
+ }
+
+ exhalbtc_display_bt_coex_info(btcoexist, m);
+}
+
void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
u8 safe_len;
- safe_len = sizeof(gl_bt_coexist.pwr_mode_val);
+ if (!btcoexist)
+ return;
+
+ safe_len = sizeof(btcoexist->pwr_mode_val);
if (safe_len > len)
safe_len = len;
- memcpy(gl_bt_coexist.pwr_mode_val, buf, safe_len);
+ memcpy(btcoexist->pwr_mode_val, buf, safe_len);
}
u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.lps_val;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return 0;
+
+ return btcoexist->bt_info.lps_val;
}
u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.rpwm_val;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return 0;
+
+ return btcoexist->bt_info.rpwm_val;
}
bool rtl_btc_is_bt_ctrl_lps(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.bt_ctrl_lps;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.bt_ctrl_lps;
}
bool rtl_btc_is_bt_lps_on(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.bt_lps_on;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.bt_lps_on;
}
void rtl_btc_get_ampdu_cfg(struct rtl_priv *rtlpriv, u8 *reject_agg,
u8 *ctrl_agg_size, u8 *agg_size)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist) {
+ *reject_agg = false;
+ *ctrl_agg_size = false;
+ return;
+ }
+
if (reject_agg)
- *reject_agg = gl_bt_coexist.bt_info.reject_agg_pkt;
+ *reject_agg = btcoexist->bt_info.reject_agg_pkt;
if (ctrl_agg_size)
- *ctrl_agg_size = gl_bt_coexist.bt_info.bt_ctrl_agg_buf_size;
+ *ctrl_agg_size = btcoexist->bt_info.bt_ctrl_agg_buf_size;
if (agg_size)
- *agg_size = gl_bt_coexist.bt_info.agg_buf_size;
+ *agg_size = btcoexist->bt_info.agg_buf_size;
+}
+
+static void rtl_btc_alloc_variable(struct rtl_priv *rtlpriv, bool wifi_only)
+{
+ if (wifi_only)
+ rtlpriv->btcoexist.wifi_only_context =
+ kzalloc(sizeof(struct wifi_only_cfg), GFP_KERNEL);
+ else
+ rtlpriv->btcoexist.btc_context =
+ kzalloc(sizeof(struct btc_coexist), GFP_KERNEL);
+}
+
+static void rtl_btc_free_variable(struct rtl_priv *rtlpriv)
+{
+ kfree(rtlpriv->btcoexist.btc_context);
+ rtlpriv->btcoexist.btc_context = NULL;
+
+ kfree(rtlpriv->btcoexist.wifi_only_context);
+ rtlpriv->btcoexist.wifi_only_context = NULL;
}
void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
{
- exhalbtc_initlize_variables();
+ rtl_btc_alloc_variable(rtlpriv, false);
+
+ exhalbtc_initlize_variables(rtlpriv);
exhalbtc_bind_bt_coex_withadapter(rtlpriv);
}
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv)
+{
+ rtl_btc_alloc_variable(rtlpriv, true);
+
+ exhalbtc_initlize_variables_wifi_only(rtlpriv);
+}
+
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv)
+{
+ rtl_btc_free_variable(rtlpriv);
+}
+
void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
{
/* move ant_num, bt_type and single_ant_path to
@@ -110,67 +195,155 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
*/
}
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_power_on_setting(btcoexist);
+}
+
void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
u8 bt_exist;
bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"%s, bt_exist is %d\n", __func__, bt_exist);
- exhalbtc_init_hw_config(&gl_bt_coexist, !bt_exist);
- exhalbtc_init_coex_dm(&gl_bt_coexist);
+ if (!btcoexist)
+ return;
+
+ exhalbtc_init_hw_config(btcoexist, !bt_exist);
+ exhalbtc_init_coex_dm(btcoexist);
+}
+
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_init_hw_config_wifi_only(wifionly_cfg);
}
void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
{
- exhalbtc_ips_notify(&gl_bt_coexist, type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_ips_notify(btcoexist, type);
+
+ if (type == ERFON) {
+ /* In some situation, it doesn't scan after leaving IPS, and
+ * this will cause btcoex in wrong state.
+ */
+ exhalbtc_scan_notify(btcoexist, 1);
+ exhalbtc_scan_notify(btcoexist, 0);
+ }
}
void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type)
{
- exhalbtc_lps_notify(&gl_bt_coexist, type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_lps_notify(btcoexist, type);
}
void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
{
- exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_scan_notify(btcoexist, scantype);
+}
+
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ u8 is_5g = (rtlhal->current_bandtype == BAND_ON_5G);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_scan_notify_wifi_only(wifionly_cfg, is_5g);
}
void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
{
- exhalbtc_connect_notify(&gl_bt_coexist, action);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_connect_notify(btcoexist, action);
}
void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
enum rt_media_status mstatus)
{
- exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_mediastatus_notify(btcoexist, mstatus);
}
void rtl_btc_periodical(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
/*rtl_bt_dm_monitor();*/
- exhalbtc_periodical(&gl_bt_coexist);
+ exhalbtc_periodical(btcoexist);
}
-void rtl_btc_halt_notify(void)
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv)
{
- struct btc_coexist *btcoexist = &gl_bt_coexist;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
exhalbtc_halt_notify(btcoexist);
}
void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
{
- exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ exhalbtc_bt_info_notify(btcoexist, tmp_buf, length);
}
void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
u8 extid, seq, len;
u16 bt_real_fw_ver;
u8 bt_fw_ver;
+ u8 *data;
+
+ if (!btcoexist)
+ return;
if ((length < 4) || (!tmp_buf))
return;
@@ -182,20 +355,70 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
len = tmp_buf[1] >> 4;
seq = tmp_buf[2] >> 4;
+ data = &tmp_buf[3];
/* BT Firmware version response */
- if (seq == 0x0E) {
+ switch (seq) {
+ case BT_SEQ_GET_BT_VERSION:
bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8);
bt_fw_ver = tmp_buf[5];
- gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver;
- gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver;
+ btcoexist->bt_info.bt_real_fw_ver = bt_real_fw_ver;
+ btcoexist->bt_info.bt_fw_ver = bt_fw_ver;
+ break;
+ case BT_SEQ_GET_AFH_MAP_L:
+ btcoexist->bt_info.afh_map_l = le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_SEQ_GET_AFH_MAP_M:
+ btcoexist->bt_info.afh_map_m = le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_SEQ_GET_AFH_MAP_H:
+ btcoexist->bt_info.afh_map_h = le16_to_cpu(*(__le16 *)data);
+ break;
+ case BT_SEQ_GET_BT_COEX_SUPPORTED_FEATURE:
+ btcoexist->bt_info.bt_supported_feature = tmp_buf[3] |
+ (tmp_buf[4] << 8);
+ break;
+ case BT_SEQ_GET_BT_COEX_SUPPORTED_VERSION:
+ btcoexist->bt_info.bt_supported_version = tmp_buf[3] |
+ (tmp_buf[4] << 8);
+ break;
+ case BT_SEQ_GET_BT_ANT_DET_VAL:
+ btcoexist->bt_info.bt_ant_det_val = tmp_buf[3];
+ break;
+ case BT_SEQ_GET_BT_BLE_SCAN_PARA:
+ btcoexist->bt_info.bt_ble_scan_para = tmp_buf[3] |
+ (tmp_buf[4] << 8) |
+ (tmp_buf[5] << 16) |
+ (tmp_buf[6] << 24);
+ break;
+ case BT_SEQ_GET_BT_BLE_SCAN_TYPE:
+ btcoexist->bt_info.bt_ble_scan_type = tmp_buf[3];
+ break;
+ case BT_SEQ_GET_BT_DEVICE_INFO:
+ btcoexist->bt_info.bt_device_info =
+ le32_to_cpu(*(__le32 *)data);
+ break;
+ case BT_OP_GET_BT_FORBIDDEN_SLOT_VAL:
+ btcoexist->bt_info.bt_forb_slot_val =
+ le32_to_cpu(*(__le32 *)data);
+ break;
}
+
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo complete req_num=%d\n", seq);
+
+ complete(&btcoexist->bt_mp_comp);
}
bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
{
- return gl_bt_coexist.bt_info.limited_dig;
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return false;
+
+ return btcoexist->bt_info.limited_dig;
}
bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
@@ -227,8 +450,13 @@ bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return true;
+
/* It seems 'bt_disabled' is never be initialized or set. */
- if (gl_bt_coexist.bt_info.bt_disabled)
+ if (btcoexist->bt_info.bt_disabled)
return true;
else
return false;
@@ -236,7 +464,50 @@ bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type)
{
- return exhalbtc_special_packet_notify(&gl_bt_coexist, pkt_type);
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+
+ if (!btcoexist)
+ return;
+
+ return exhalbtc_special_packet_notify(btcoexist, pkt_type);
+}
+
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning)
+{
+ struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
+ u8 type = BTC_NOT_SWITCH;
+
+ if (!btcoexist)
+ return;
+
+ switch (band_type) {
+ case BAND_ON_2_4G:
+ if (scanning)
+ type = BTC_SWITCH_TO_24G;
+ else
+ type = BTC_SWITCH_TO_24G_NOFORSCAN;
+ break;
+
+ case BAND_ON_5G:
+ type = BTC_SWITCH_TO_5G;
+ break;
+ }
+
+ if (type != BTC_NOT_SWITCH)
+ exhalbtc_switch_band_notify(btcoexist, type);
+}
+
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning)
+{
+ struct wifi_only_cfg *wifionly_cfg = rtl_btc_wifi_only(rtlpriv);
+ u8 is_5g = (band_type == BAND_ON_5G);
+
+ if (!wifionly_cfg)
+ return;
+
+ exhalbtc_switch_band_notify_wifi_only(wifionly_cfg, is_5g);
}
struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
index ac1253c46f44..8c996055de71 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
@@ -28,22 +28,32 @@
#include "halbt_precomp.h"
void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_variables_wifi_only(struct rtl_priv *rtlpriv);
+void rtl_btc_deinit_variables(struct rtl_priv *rtlpriv);
void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv);
void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config_wifi_only(struct rtl_priv *rtlpriv);
void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type);
void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_scan_notify_wifi_only(struct rtl_priv *rtlpriv, u8 scantype);
void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
enum rt_media_status mstatus);
void rtl_btc_periodical(struct rtl_priv *rtlpriv);
-void rtl_btc_halt_notify(void);
+void rtl_btc_halt_notify(struct rtl_priv *rtlpriv);
void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length);
bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
+void rtl_btc_switch_band_notify(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning);
+void rtl_btc_switch_band_notify_wifionly(struct rtl_priv *rtlpriv, u8 band_type,
+ bool scanning);
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m);
void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
u8 rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
u8 rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 3cb88825473e..cfea57efa7f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -345,9 +345,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtlpriv->locks.conf_mutex);
/* Free beacon resources */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
if (mac->beacon_enabled == 1) {
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -858,8 +858,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
* here just used for linked scanning, & linked
* and nolink check bssid is set in set network_type
*/
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+ mac->link_state >= MAC80211_LINKED) {
if (mac->opmode != NL80211_IFTYPE_AP &&
mac->opmode != NL80211_IFTYPE_MESH_POINT) {
if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1044,10 +1044,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
mutex_lock(&rtlpriv->locks.conf_mutex);
- if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if ((changed & BSS_CHANGED_BEACON) ||
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (changed & BSS_CHANGED_BEACON ||
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
@@ -1162,6 +1162,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
"BSS_CHANGED_ASSOC\n");
} else {
+ struct cfg80211_bss *bss = NULL;
+
mstatus = RT_MEDIA_DISCONNECT;
if (mac->link_state == MAC80211_LINKED)
@@ -1169,6 +1171,22 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
+
+ bss = cfg80211_get_bss(hw->wiphy, NULL,
+ (u8 *)mac->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_OFF);
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
+
+ if (bss) {
+ cfg80211_unlink_bss(hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
+ }
+
eth_zero_addr(mac->bssid);
mac->vendor = PEER_UNKNOWN;
mac->mode = 0;
@@ -1435,6 +1453,9 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 1);
+ else if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+ 1);
if (rtlpriv->dm.supp_phymode_switch) {
if (rtlpriv->cfg->ops->chk_switch_dmdp)
@@ -1490,6 +1511,9 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_scan_notify(rtlpriv, 0);
+ else if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_scan_notify_wifi_only(rtlpriv,
+ 0);
}
static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1513,9 +1537,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
- if (((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"%s hardware based encryption for keyidx: %d, mac: %pM\n",
@@ -1588,7 +1612,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
} else {
- if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION &&
@@ -1775,7 +1799,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
break;
case PWR_CMD_WRITE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 38fef6dbb44b..d70385be9976 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -23,15 +23,17 @@
*****************************************************************************/
#include "wifi.h"
+#include "cam.h"
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_RTLWIFI_DEBUG
void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -51,7 +53,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -81,4 +83,481 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
}
EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
+struct rtl_debugfs_priv {
+ struct rtl_priv *rtlpriv;
+ int (*cb_read)(struct seq_file *m, void *v);
+ ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *loff);
+ u32 cb_data;
+};
+
+static struct dentry *debugfs_topdir;
+
+static int rtl_debug_get_common(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+
+ return debugfs_priv->cb_read(m, v);
+}
+
+static int dl_debug_open_common(struct inode *inode, struct file *file)
+{
+ return single_open(file, rtl_debug_get_common, inode->i_private);
+}
+
+static const struct file_operations file_ops_common = {
+ .open = dl_debug_open_common,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_read_dword(rtlpriv, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
+ .cb_read = rtl_debug_get_mac_page, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
+RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100);
+RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200);
+RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300);
+RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400);
+RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500);
+RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600);
+RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700);
+RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000);
+RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100);
+RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200);
+RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300);
+RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400);
+RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500);
+RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600);
+RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700);
+
+static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtl_get_bbreg(hw, (page | n), 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
+ .cb_read = rtl_debug_get_bb_page, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
+RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900);
+RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00);
+RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00);
+RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00);
+RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00);
+RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
+RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00);
+RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800);
+RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900);
+RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00);
+RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00);
+RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00);
+RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00);
+RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00);
+RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00);
+
+static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ enum radio_path rfpath = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0x40;
+
+ if (IS_HARDWARE_TYPE_8822B(rtlpriv))
+ max = 0xff;
+
+ seq_printf(m, "\nPATH(%d)", rfpath);
+
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n);
+ for (i = 0; i < 4 && n <= max; n += 1, i++)
+ seq_printf(m, "%8.8x ",
+ rtl_get_rfreg(hw, rfpath, n, 0xffffffff));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
+ .cb_read = rtl_debug_get_reg_rf, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
+RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B);
+
+static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ int start = debugfs_priv->cb_data;
+ u32 target_cmd = 0;
+ u32 target_val = 0;
+ u8 entry_i = 0;
+ u32 ulstatus;
+ int i = 100, j = 0;
+ int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11);
+
+ /* This dump the current register page */
+ seq_printf(m,
+ "\n#################### SECURITY CAM (%d-%d) ##################\n",
+ start, end - 1);
+
+ for (j = start; j < end; j++) {
+ seq_printf(m, "\nD: %2x > ", j);
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ /* polling bit, and No Write enable, and address */
+ target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+ target_cmd = target_cmd | BIT(31);
+
+ /* Check polling bit is clear */
+ while ((i--) >= 0) {
+ ulstatus =
+ rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RWCAM]);
+ if (ulstatus & BIT(31))
+ continue;
+ else
+ break;
+ }
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_cmd);
+ target_val = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[RCAMO]);
+ seq_printf(m, "%8.8x ", target_val);
+ }
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
+ .cb_read = rtl_debug_get_cam_register, \
+ .cb_data = addr, \
+}
+
+RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
+RTL_DEBUG_IMPL_CAM_SERIES(2, 11);
+RTL_DEBUG_IMPL_CAM_SERIES(3, 22);
+
+static int rtl_debug_get_btcoex(struct seq_file *m, void *v)
+{
+ struct rtl_debugfs_priv *debugfs_priv = m->private;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv,
+ m);
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_btcoex = {
+ .cb_read = rtl_debug_get_btcoex,
+ .cb_data = 0,
+};
+
+static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ char tmp[32 + 1];
+ int tmp_len;
+ u32 addr, val, len;
+ int num;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ /* write BB/MAC register */
+ num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3)
+ return count;
+
+ switch (len) {
+ case 1:
+ rtl_write_byte(rtlpriv, addr, (u8)val);
+ break;
+ case 2:
+ rtl_write_word(rtlpriv, addr, (u16)val);
+ break;
+ case 4:
+ rtl_write_dword(rtlpriv, addr, val);
+ break;
+ default:
+ /*printk("error write length=%d", len);*/
+ break;
+ }
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_reg = {
+ .cb_write = rtl_debugfs_set_write_reg,
+};
+
+static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ char tmp[32 + 1];
+ int tmp_len;
+ u8 h2c_len, h2c_data_packed[8];
+ int h2c_data[8]; /* idx 0: cmd */
+ int i;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X",
+ &h2c_data[0], &h2c_data[1],
+ &h2c_data[2], &h2c_data[3],
+ &h2c_data[4], &h2c_data[5],
+ &h2c_data[6], &h2c_data[7]);
+
+ if (h2c_len <= 0)
+ return count;
+
+ for (i = 0; i < h2c_len; i++)
+ h2c_data_packed[i] = (u8)h2c_data[i];
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0],
+ h2c_len - 1,
+ &h2c_data_packed[1]);
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = {
+ .cb_write = rtl_debugfs_set_write_h2c,
+};
+
+static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+ struct ieee80211_hw *hw = rtlpriv->hw;
+ char tmp[32 + 1];
+ int tmp_len;
+ int num;
+ int path;
+ u32 addr, bitmask, data;
+
+ if (count < 3)
+ return -EFAULT;
+
+ tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ num = sscanf(tmp, "%X %X %X %X",
+ &path, &addr, &bitmask, &data);
+
+ if (num != 4) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
+ return count;
+ }
+
+ rtl_set_rfreg(hw, path, addr, bitmask, data);
+
+ return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = {
+ .cb_write = rtl_debugfs_set_write_rfreg,
+};
+
+static int rtl_debugfs_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static ssize_t rtl_debugfs_common_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static const struct file_operations file_ops_common_write = {
+ .owner = THIS_MODULE,
+ .write = rtl_debugfs_common_write,
+ .open = simple_open,
+ .release = rtl_debugfs_close,
+};
+
+#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
+ do { \
+ rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
+ if (!debugfs_create_file(#name, mode, \
+ parent, &rtl_debug_priv_ ##name, \
+ &file_ops_ ##fopname)) \
+ pr_err("Unable to initialize debugfs:%s/%s\n", \
+ rtlpriv->dbg.debugfs_name, \
+ #name); \
+ } while (0)
+
+#define RTL_DEBUGFS_ADD(name) \
+ RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common)
+#define RTL_DEBUGFS_ADD_W(name) \
+ RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write)
+
+void rtl_debug_add_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct dentry *parent;
+
+ snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr);
+
+ rtlpriv->dbg.debugfs_dir =
+ debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
+ if (!rtlpriv->dbg.debugfs_dir) {
+ pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
+ rtlpriv->dbg.debugfs_name);
+ return;
+ }
+
+ parent = rtlpriv->dbg.debugfs_dir;
+
+ RTL_DEBUGFS_ADD(mac_0);
+ RTL_DEBUGFS_ADD(mac_1);
+ RTL_DEBUGFS_ADD(mac_2);
+ RTL_DEBUGFS_ADD(mac_3);
+ RTL_DEBUGFS_ADD(mac_4);
+ RTL_DEBUGFS_ADD(mac_5);
+ RTL_DEBUGFS_ADD(mac_6);
+ RTL_DEBUGFS_ADD(mac_7);
+ RTL_DEBUGFS_ADD(bb_8);
+ RTL_DEBUGFS_ADD(bb_9);
+ RTL_DEBUGFS_ADD(bb_a);
+ RTL_DEBUGFS_ADD(bb_b);
+ RTL_DEBUGFS_ADD(bb_c);
+ RTL_DEBUGFS_ADD(bb_d);
+ RTL_DEBUGFS_ADD(bb_e);
+ RTL_DEBUGFS_ADD(bb_f);
+ RTL_DEBUGFS_ADD(mac_10);
+ RTL_DEBUGFS_ADD(mac_11);
+ RTL_DEBUGFS_ADD(mac_12);
+ RTL_DEBUGFS_ADD(mac_13);
+ RTL_DEBUGFS_ADD(mac_14);
+ RTL_DEBUGFS_ADD(mac_15);
+ RTL_DEBUGFS_ADD(mac_16);
+ RTL_DEBUGFS_ADD(mac_17);
+ RTL_DEBUGFS_ADD(bb_18);
+ RTL_DEBUGFS_ADD(bb_19);
+ RTL_DEBUGFS_ADD(bb_1a);
+ RTL_DEBUGFS_ADD(bb_1b);
+ RTL_DEBUGFS_ADD(bb_1c);
+ RTL_DEBUGFS_ADD(bb_1d);
+ RTL_DEBUGFS_ADD(bb_1e);
+ RTL_DEBUGFS_ADD(bb_1f);
+ RTL_DEBUGFS_ADD(rf_a);
+ RTL_DEBUGFS_ADD(rf_b);
+
+ RTL_DEBUGFS_ADD(cam_1);
+ RTL_DEBUGFS_ADD(cam_2);
+ RTL_DEBUGFS_ADD(cam_3);
+
+ RTL_DEBUGFS_ADD(btcoex);
+
+ RTL_DEBUGFS_ADD_W(write_reg);
+ RTL_DEBUGFS_ADD_W(write_h2c);
+ RTL_DEBUGFS_ADD_W(write_rfreg);
+}
+EXPORT_SYMBOL_GPL(rtl_debug_add_one);
+
+void rtl_debug_remove_one(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir);
+ rtlpriv->dbg.debugfs_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(rtl_debug_remove_one);
+
+void rtl_debugfs_add_topdir(void)
+{
+ debugfs_topdir = debugfs_create_dir("rtlwifi", NULL);
+}
+
+void rtl_debugfs_remove_topdir(void)
+{
+ debugfs_remove_recursive(debugfs_topdir);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 947718001457..ad6834af618b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -219,4 +219,16 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
}
#endif
+
+#ifdef CONFIG_RTLWIFI_DEBUG
+void rtl_debug_add_one(struct ieee80211_hw *hw);
+void rtl_debug_remove_one(struct ieee80211_hw *hw);
+void rtl_debugfs_add_topdir(void);
+void rtl_debugfs_remove_topdir(void);
+#else
+#define rtl_debug_add_one(hw)
+#define rtl_debug_remove_one(hw)
+#define rtl_debugfs_add_topdir()
+#define rtl_debugfs_remove_topdir()
+#endif
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index ef9acd466cca..35b50be633f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -257,11 +257,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
- efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+ efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
goto out;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
+ efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16),
GFP_ATOMIC);
if (!efuse_word[i])
goto done;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c2575b0b9440..01ccf8884831 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
__le16 fc = rtl_get_fc(skb);
u8 queue_index = skb_get_queue_mapping(skb);
+ struct ieee80211_hdr *hdr;
if (unlikely(ieee80211_is_beacon(fc)))
return BEACON_QUEUE;
@@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
if (ieee80211_is_nullfunc(fc))
return HIGH_QUEUE;
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+ hdr = rtl_get_hdr(skb);
+
+ if (is_multicast_ether_addr(hdr->addr1) ||
+ is_broadcast_ether_addr(hdr->addr1))
+ return HIGH_QUEUE;
+ }
return ac_to_hwq[queue_index];
}
@@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
else
entry = (u8 *)(&ring->desc[ring->idx]);
- if (rtlpriv->cfg->ops->get_available_desc &&
- rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
- "no available desc!\n");
- return;
- }
-
if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
return;
ring->idx = (ring->idx + 1) % ring->entries;
@@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
u8 tmp_one;
bool unicast = false;
u8 hw_queue = 0;
- unsigned int rx_remained_cnt;
+ unsigned int rx_remained_cnt = 0;
struct rtl_stats stats = {
.signal = 0,
.rate = 0,
@@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
- rx_remained_cnt =
+ if (rx_remained_cnt == 0)
+ rx_remained_cnt =
rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
hw_queue);
if (rx_remained_cnt == 0)
@@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
- u32 inta = 0;
- u32 intb = 0;
- u32 intc = 0;
- u32 intd = 0;
+ struct rtl_int intvec = {0};
+
irqreturn_t ret = IRQ_HANDLED;
if (rtlpci->irq_enabled == 0)
@@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->disable_interrupt(hw);
/*read ISR: 4/8bytes */
- rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
/*Shared IRQ or HW disappeared */
- if (!inta || inta == 0xffff)
+ if (!intvec.inta || intvec.inta == 0xffff)
goto done;
/*<1> beacon related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon ok interrupt!\n");
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
+ if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon err interrupt!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+ if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
- if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
- if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
- if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+ if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
}
/*<3> Rx related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+ if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
- if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+ if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
- if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+ if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
@@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+ if (unlikely(intvec.inta &
+ rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
@@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].cur_tx_rp = 0;
rtlpci->tx_ring[prio].cur_tx_wp = 0;
- rtlpci->tx_ring[prio].avl_desc = entries;
}
/* alloc dma for this ring */
@@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
+
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpci->tx_ring[i].cur_tx_rp = 0;
+ rtlpci->tx_ring[i].cur_tx_wp = 0;
+ }
+
ring->idx = 0;
+ ring->entries = rtlpci->txringcount[i];
}
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
@@ -1787,6 +1794,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
int err;
@@ -1796,9 +1804,12 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
if (rtlpriv->cfg->ops->get_btc_status &&
rtlpriv->cfg->ops->get_btc_status()) {
rtlpriv->btcoexist.btc_info.ap_num = 36;
- rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
- rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
+ btc_ops->btc_init_variables(rtlpriv);
+ btc_ops->btc_init_hal_vars(rtlpriv);
+ } else if (btc_ops) {
+ btc_ops->btc_init_variables_wifi_only(rtlpriv);
}
+
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
@@ -1834,7 +1845,10 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
u8 rf_timeout = 0;
if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv);
+
+ if (rtlpriv->btcoexist.btc_ops)
+ rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv);
/*should be before disable interrupt&adapter
*and will do it immediately.
@@ -2302,6 +2316,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
rtlpriv->mac80211.mac80211_registered = 1;
+ /* add for debug */
+ rtl_debug_add_one(hw);
+
/*init rfkill */
rtl_init_rfkill(hw); /* Init PCI sw */
@@ -2350,6 +2367,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
wait_for_completion(&rtlpriv->firmware_loading_complete);
clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ /* remove form debug */
+ rtl_debug_remove_one(hw);
+
/*ieee80211_unregister_hw will call ops_stop */
if (rtlmac->mac80211_registered == 1) {
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index e7d070e8da2d..3fb56c845a61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -173,7 +173,6 @@ struct rtl8192_tx_ring {
/*add for new trx flow*/
struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
- u16 avl_desc; /* available_desc_to_write */
u16 cur_tx_wp; /* current_tx_write_point */
u16 cur_tx_rp; /* current_tx_read_point */
};
@@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
-static inline u16 calc_fifo_space(u16 rp, u16 wp)
+static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
{
if (rp <= wp)
- return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+ return size - 1 + rp - wp;
return rp - wp - 1;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 24c87fae5382..71af24e2e051 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -51,6 +51,11 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
&rtlmac->retry_long);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ rtlpriv->cfg->ops->switch_channel(hw);
+ rtlpriv->cfg->ops->set_channel_access(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw,
+ cfg80211_get_chandef_type(&hw->conf.chandef));
+
/*<3> Enable Interrupt */
rtlpriv->cfg->ops->enable_interrupt(hw);
@@ -289,7 +294,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
- spin_lock(&rtlpriv->locks.ips_lock);
+ mutex_lock(&rtlpriv->locks.ips_mutex);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -306,7 +311,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
ppsc->inactive_pwrstate);
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ mutex_unlock(&rtlpriv->locks.ips_mutex);
}
EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
@@ -415,7 +420,6 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flag;
if (!ppsc->fwctrl_lps)
return;
@@ -436,7 +440,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
/* Don't need to check (ppsc->dot11_psmode == EACTIVE), because
* bt_ccoexist may ask to enter lps.
@@ -446,7 +450,7 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
"Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
/* Interrupt safe routine to leave the leisure power save mode.*/
@@ -455,9 +459,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flag;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -478,7 +481,7 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
/* For sw LPS*/
@@ -568,7 +571,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- unsigned long flag;
if (!rtlpriv->psc.swctrl_lps)
return;
@@ -581,9 +583,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -600,7 +602,6 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- unsigned long flag;
u8 sleep_intv;
if (!rtlpriv->psc.sw_ps_enabled)
@@ -624,9 +625,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
}
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ mutex_lock(&rtlpriv->locks.lps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ mutex_unlock(&rtlpriv->locks.lps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 02811eda57cd..d1cb7d405618 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -123,7 +123,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sta && (sta->ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && (sta->vht_cap.vht_supported))
+ if (sta && sta->vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_40)
@@ -135,8 +135,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (sta && sta->ht_cap.ht_supported &&
- ((wireless_mode == WIRELESS_MODE_N_5G) ||
- (wireless_mode == WIRELESS_MODE_N_24G)))
+ (wireless_mode == WIRELESS_MODE_N_5G ||
+ wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
if (sta && sta->vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -216,8 +216,8 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
- sta_entry = (struct rtl_sta_info *) sta->drv_priv;
- if ((sta->ht_cap.ht_supported) &&
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ if (sta->ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
@@ -265,11 +265,9 @@ static void *rtl_rate_alloc_sta(void *ppriv,
struct rtl_priv *rtlpriv = ppriv;
struct rtl_rate_priv *rate_priv;
- rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
- if (!rate_priv) {
- pr_err("Unable to allocate private rc structure\n");
+ rate_priv = kzalloc(sizeof(*rate_priv), gfp);
+ if (!rate_priv)
return NULL;
- }
rtlpriv->rate_priv = rate_priv;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index a2eca669873b..63874512598b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -141,6 +141,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
return 1;
pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+ rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index e30a18e64ff5..988d5ac57d02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
}
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index cdf49de1e6ed..214cd2a32018 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -29,8 +29,7 @@
void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl88ee_hw_init(struct ieee80211_hw *hw);
void rtl88ee_card_disable(struct ieee80211_hw *hw);
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 0f4c86a28716..4a81e0ef4b8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
}
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
-
- /*
- * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
- */
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
}
void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index b5c8e2fc1ba2..6711ea1a75d9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92ce_hw_init(struct ieee80211_hw *hw);
void rtl92ce_card_disable(struct ieee80211_hw *hw);
void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 0da6c0136857..80123fd97221 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
}
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
-
- /*
- * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
- */
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
}
void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 9236aa91273d..e6c702e69ecf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -29,8 +29,7 @@
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92de_hw_init(struct ieee80211_hw *hw);
void rtl92de_card_disable(struct ieee80211_hw *hw);
void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index fe5da637e77a..fd7928fdbd1a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
}
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index cd6d3322f033..3a63bec9b0cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -29,8 +29,7 @@
void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92ee_hw_init(struct ieee80211_hw *hw);
void rtl92ee_card_disable(struct ieee80211_hw *hw);
void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 12255682e890..4f7444331b07 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
if (!start_rx)
return 0;
- remind_cnt = calc_fifo_space(read_point, write_point);
+ remind_cnt = calc_fifo_space(read_point, write_point,
+ RTL_PCI_MAX_RX_COUNT);
if (remind_cnt == 0)
return 0;
@@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
{
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
u16 point_diff = 0;
u16 current_tx_read_point = 0, current_tx_write_point = 0;
@@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
point_diff = calc_fifo_space(current_tx_read_point,
- current_tx_write_point);
+ current_tx_write_point,
+ TX_DESC_NUM_92E);
- rtlpci->tx_ring[q_idx].avl_desc = point_diff;
return point_diff;
}
@@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 cur_tx_rp = 0;
- u16 cur_tx_wp = 0;
- static bool over_run;
- u32 tmp = 0;
u8 q_idx = *val;
bool dma64 = rtlpriv->cfg->mod_params->dma64;
@@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
return;
}
+ /* make sure tx desc is available by caller */
ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
- if (over_run) {
- ring->cur_tx_wp = 0;
- over_run = false;
- }
- if (ring->avl_desc > 1) {
- ring->avl_desc--;
-
- rtl_write_word(rtlpriv,
- get_desc_addr_fr_q_idx(q_idx),
- ring->cur_tx_wp);
- }
-
- if (ring->avl_desc < (max_tx_desc - 15)) {
- u16 point_diff = 0;
-
- tmp =
- rtl_read_dword(rtlpriv,
- get_desc_addr_fr_q_idx(q_idx));
- cur_tx_rp = (u16)((tmp >> 16) & 0x0fff);
- cur_tx_wp = (u16)(tmp & 0x0fff);
-
- ring->cur_tx_wp = cur_tx_wp;
- ring->cur_tx_rp = cur_tx_rp;
- point_diff = ((cur_tx_rp > cur_tx_wp) ?
- (cur_tx_rp - cur_tx_wp) :
- (TX_DESC_NUM_92E - 1 -
- cur_tx_wp + cur_tx_rp));
-
- ring->avl_desc = point_diff;
- }
+ rtl_write_word(rtlpriv,
+ get_desc_addr_fr_q_idx(q_idx),
+ ring->cur_tx_wp);
}
break;
}
@@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u16 read_point, write_point, available_desc_num;
+ u16 read_point, write_point;
bool ret = false;
static u8 stop_report_cnt;
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
{
- u16 point_diff = 0;
u16 cur_tx_rp, cur_tx_wp;
u32 tmpu32 = 0;
@@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
cur_tx_wp = (u16)(tmpu32 & 0x0fff);
- ring->cur_tx_wp = cur_tx_wp;
+ /* don't need to update ring->cur_tx_wp */
ring->cur_tx_rp = cur_tx_rp;
- point_diff = ((cur_tx_rp > cur_tx_wp) ?
- (cur_tx_rp - cur_tx_wp) :
- (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp));
-
- ring->avl_desc = point_diff;
}
read_point = ring->cur_tx_rp;
write_point = ring->cur_tx_wp;
- available_desc_num = ring->avl_desc;
if (write_point > read_point) {
if (index < write_point && index >= read_point)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 76bf089cced4..30dea7b9bc17 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
udelay(100);
}
-void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
- u32 *p_intb, u32 *p_intc, u32 *p_intd)
+void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, ISR + 4, intvec->intb);
}
void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 607056010974..fa836ceefc8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
u8 variable, u8 *val);
void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl92se_hw_init(struct ieee80211_hw *hw);
void rtl92se_card_disable(struct ieee80211_hw *hw);
void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c3f98d58124c..545115db507e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
}
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, 0x3a0, intvec->inta);
}
void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index 19e467a37c72..c76e453f4f43 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8723e_hw_init(struct ieee80211_hw *hw);
void rtl8723e_card_disable(struct ieee80211_hw *hw);
void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 7cd1ffa7d4a7..f9ccd13c79f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -43,6 +43,7 @@
#include "../pwrseqcmd.h"
#include "pwrseq.h"
#include "../btcoexist/rtl_btc.h"
+#include <linux/kernel.h>
#define LLT_CONFIG 5
@@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
}
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
- rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+ rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
@@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
if (rtlhal->oem_id == RT_CID_DEFAULT) {
/* Does this one have a Toshiba SMID from group 1? */
- for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) {
if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
is_toshiba_smid1 = true;
break;
}
}
/* Does this one have a Toshiba SMID from group 2? */
- for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) {
if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
is_toshiba_smid2 = true;
break;
}
}
/* Does this one have a Samsung SMID? */
- for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) {
if (rtlefuse->eeprom_smid == samsung_smid[i]) {
is_samsung_smid = true;
break;
}
}
/* Does this one have a Lenovo SMID? */
- for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) {
if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
is_lenovo_smid = true;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index 2215a792f6bf..ae856a19e81a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8723be_hw_init(struct ieee80211_hw *hw);
void rtl8723be_card_disable(struct ieee80211_hw *hw);
void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9606641519e7..1263b12db5dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -35,6 +35,7 @@
#include "../rtl8723com/dm_common.h"
#include "table.h"
#include "trx.h"
+#include <linux/kernel.h>
static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
DESC92C_RATEMCS2, DESC92C_RATEMCS3,
DESC92C_RATEMCS4, DESC92C_RATEMCS5,
DESC92C_RATEMCS6, DESC92C_RATEMCS7};
- u8 i, size;
+ u8 i;
u8 power_index;
if (!rtlefuse->txpwr_fromeprom)
return;
- size = sizeof(cck_rates) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(cck_rates); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
cck_rates[i],
rtl_priv(hw)->phy.current_chan_bw,
@@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
cck_rates[i]);
}
- size = sizeof(ofdm_rates) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
ofdm_rates[i],
rtl_priv(hw)->phy.current_chan_bw,
@@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
ofdm_rates[i]);
}
- size = sizeof(ht_rates_1t) / sizeof(u8);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) {
power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
ht_rates_1t[i],
rtl_priv(hw)->phy.current_chan_bw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
index 381c16b9b3a9..160fee8333ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
@@ -25,6 +25,7 @@
*
*****************************************************************************/
+#include <linux/kernel.h>
#include "table.h"
u32 RTL8723BEPHY_REG_1TARRAY[] = {
@@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = {
};
-u32 RTL8723BEPHY_REG_1TARRAYLEN =
- sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32);
+u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY);
u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
@@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
};
-u32 RTL8723BEPHY_REG_ARRAY_PGLEN =
- sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG);
u32 RTL8723BE_RADIOA_1TARRAY[] = {
0x000, 0x00010000,
@@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = {
};
-u32 RTL8723BE_RADIOA_1TARRAYLEN =
- sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32);
+u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY);
u32 RTL8723BEMAC_1T_ARRAY[] = {
0x02F, 0x00000030,
@@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = {
};
-u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32);
+u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY);
u32 RTL8723BEAGCTAB_1TARRAY[] = {
0xC78, 0xFD000001,
@@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = {
};
-u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32);
+u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index efa7e1262461..521039c5d4ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -196,10 +196,12 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
enum version_8723e version = rtlhal->version;
int max_page;
- if (!rtlhal->pfirmware)
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+ rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+ rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 43e18c4c1e68..f20e77b4bb65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
}
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd)
+ struct rtl_int *intvec)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
- rtl_write_dword(rtlpriv, ISR, *p_inta);
+ intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, intvec->inta);
- *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
- rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+ intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
}
void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index 284d259fe557..e2ab783a2ad9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *int_vec);
int rtl8821ae_hw_init(struct ieee80211_hw *hw);
void rtl8821ae_card_disable(struct ieee80211_hw *hw);
void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 408c4611e5de..f87f9d03b9fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -24,7 +24,7 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
-
+#include <linux/kernel.h>
#include "table.h"
u32 RTL8812AE_PHY_REG_ARRAY[] = {
0x800, 0x8020D010,
@@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
0xEB8, 0x00508242,
};
-u32 RTL8812AE_PHY_REG_1TARRAYLEN =
- sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY);
u32 RTL8821AE_PHY_REG_ARRAY[] = {
0x800, 0x0020D090,
@@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
0xCB8, 0x00508240,
};
-u32 RTL8821AE_PHY_REG_1TARRAYLEN =
- sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY);
u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
@@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
};
-u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
- sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG);
u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
@@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
};
-u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
- sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG);
u32 RTL8812AE_RADIOA_ARRAY[] = {
0x000, 0x00010000,
@@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
0x018, 0x0001712A,
};
-u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY);
u32 RTL8812AE_RADIOB_ARRAY[] = {
0x056, 0x00051CF2,
@@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
0x008, 0x00008400,
};
-u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY);
u32 RTL8821AE_RADIOA_ARRAY[] = {
0x018, 0x0001712A,
@@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
};
-u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY);
u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x010, 0x0000000C,
@@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
0x718, 0x00000040,
};
-u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x428, 0x0000000A,
@@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
0x718, 0x00000040,
};
-u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY);
u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
0xE50, 0x00000020,
};
-u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
- sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0x81C, 0xBF000001,
@@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
0xC50, 0x00000020,
};
-u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
- sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
/******************************************************************************
* TXPWR_LMT.TXT
@@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
-u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
u8 *RTL8821AE_TXPWR_LMT[] = {
"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
"MKK", "5G", "80M", "VHT", "2T", "155", "63"
};
-u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 39b033b3b53a..ce3103bb8ebb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -962,7 +962,6 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
- rtl_ips_nic_on(hw);
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 92d4859ec906..a7aacbc3984e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -873,6 +873,24 @@ enum ratr_table_mode {
RATR_INX_WIRELESS_AC_24N = 9,
};
+enum ratr_table_mode_new {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+ RATEID_IDX_MIX1 = 11,
+ RATEID_IDX_MIX2 = 12,
+ RATEID_IDX_VHT_3SS = 13,
+ RATEID_IDX_BGN_3SS = 14,
+};
+
enum rtl_link_state {
MAC80211_NOLINK = 0,
MAC80211_LINKING = 1,
@@ -931,6 +949,10 @@ enum package_type {
PACKAGE_TFBGA79
};
+enum rtl_spec_ver {
+ RTL_SPEC_NEW_RATEID = BIT(0), /* use ratr_table_mode_new */
+};
+
struct octet_string {
u8 *octet;
u16 length;
@@ -2093,14 +2115,21 @@ struct rtl_wow_pattern {
u32 mask[4];
};
+/* struct to store contents of interrupt vectors */
+struct rtl_int {
+ u32 inta;
+ u32 intb;
+ u32 intc;
+ u32 intd;
+};
+
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
void (*read_chip_version)(struct ieee80211_hw *hw);
void (*read_eeprom_info) (struct ieee80211_hw *hw);
void (*interrupt_recognized) (struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb,
- u32 *p_intc, u32 *p_intd);
+ struct rtl_int *intvec);
int (*hw_init) (struct ieee80211_hw *hw);
void (*hw_disable) (struct ieee80211_hw *hw);
void (*hw_suspend) (struct ieee80211_hw *hw);
@@ -2308,6 +2337,7 @@ struct rtl_hal_cfg {
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
+ enum rtl_spec_ver spec_ver;
/*this map used for some registers or vars
defined int HAL but used in MAIN */
@@ -2318,17 +2348,14 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
- struct mutex ps_mutex;
+ struct mutex ips_mutex; /* mutex for enter/leave IPS */
+ struct mutex lps_mutex; /* mutex for enter/leave LPS */
/*spin lock */
- spinlock_t ips_lock;
spinlock_t irq_th_lock;
- spinlock_t irq_pci_lock;
- spinlock_t tx_lock;
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
spinlock_t entry_list_lock;
spinlock_t usb_lock;
@@ -2341,9 +2368,6 @@ struct rtl_locks {
/*Dual mac*/
spinlock_t cck_and_rw_pagea_lock;
- /*Easy concurrent*/
- spinlock_t check_sendpkt_lock;
-
spinlock_t iqk_lock;
};
@@ -2374,6 +2398,12 @@ struct rtl_works {
struct work_struct fill_h2c_cmd;
};
+struct rtl_debug {
+ /* add for debug */
+ struct dentry *debugfs_dir;
+ char debugfs_name[20];
+};
+
#define MIMO_PS_STATIC 0
#define MIMO_PS_DYNAMIC 1
#define MIMO_PS_NOLIMIT 3
@@ -2493,6 +2523,9 @@ struct rtl_btc_info {
struct bt_coexist_info {
struct rtl_btc_ops *btc_ops;
struct rtl_btc_info btc_info;
+ /* btc context */
+ void *btc_context;
+ void *wifi_only_context;
/* EEPROM BT info. */
u8 eeprom_bt_coexist;
u8 eeprom_bt_type;
@@ -2549,16 +2582,22 @@ struct bt_coexist_info {
struct rtl_btc_ops {
void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv);
+ void (*btc_deinit_variables)(struct rtl_priv *rtlpriv);
void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv);
void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv,
+ u8 scantype);
void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
enum rt_media_status mstatus);
void (*btc_periodical) (struct rtl_priv *rtlpriv);
- void (*btc_halt_notify) (void);
+ void (*btc_halt_notify)(struct rtl_priv *rtlpriv);
void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
u8 *tmp_buf, u8 length);
void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv,
@@ -2568,6 +2607,12 @@ struct rtl_btc_ops {
bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
u8 pkt_type);
+ void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type,
+ bool scanning);
+ void (*btc_switch_band_notify_wifi_only)(struct rtl_priv *rtlpriv,
+ u8 type, bool scanning);
+ void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv,
+ struct seq_file *m);
void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
u8 (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
u8 (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
@@ -2642,6 +2687,7 @@ struct rtl_priv {
/* c2hcmd list for kthread level access */
struct list_head c2hcmd_list;
+ struct rtl_debug dbg;
int max_fw_size;
/*
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 1d799bffaa9f..e7d77acdf18c 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
/* asking for the data path parameters */
wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
GFP_KERNEL);
- if (!wl->data_path) {
- wl1251_error("Couldnt allocate data path parameters");
+ if (!wl->data_path)
return -ENOMEM;
- }
ret = wl1251_acx_data_path_params(wl, wl->data_path);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 6d02c660b4ab..037defd10b91 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index a4859993db3c..3ca9167d6146 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
feature, sizeof(*feature));
if (ret < 0) {
- wl1271_error("Couldnt set HW encryption");
+ wl1271_error("Couldn't set HW encryption");
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index f46d7fdf9a00..7011c5d9599f 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl);
int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
s8 *avg_rssi);
-#ifdef CONFIG_PM
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action);
int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
struct wl12xx_rx_filter *filter);
-#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d47921a84509..09714034dbf1 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -42,6 +42,7 @@
#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
+#define WL1271_SUSPEND_SLEEP 100
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
@@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
- struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
@@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
}
/* update the host-chipset time offset */
- getnstimeofday(&ts);
- wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+ wl->time_offset = (ktime_get_boot_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;
@@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
+static int wlcore_fw_sleep(struct wl1271 *wl)
+{
+ int ret;
+
+ mutex_lock(&wl->mutex);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+out:
+ mutex_unlock(&wl->mutex);
+ mdelay(WL1271_SUSPEND_SLEEP);
+
+ return 0;
+}
+
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
}
-#ifdef CONFIG_PM
static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{
@@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
}
-static int wl1271_op_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wow)
+static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
@@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
mutex_unlock(&wl->mutex);
if (ret < 0) {
@@ -1782,10 +1797,19 @@ out_sleep:
*/
cancel_delayed_work(&wl->tx_watchdog_work);
+ /*
+ * Use an immediate call for allowing the firmware to go into power
+ * save during suspend.
+ * Using a workque for this last write was only hapenning on resume
+ * leaving the firmware with power save disabled during suspend,
+ * while consuming full power during wowlan suspend.
+ */
+ wlcore_fw_sleep(wl);
+
return 0;
}
-static int wl1271_op_resume(struct ieee80211_hw *hw)
+static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
@@ -1869,7 +1893,6 @@ out:
return 0;
}
-#endif
static int wl1271_op_start(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a3f5e9ca492a..00e9b4624dcf 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra,
struct ieee80211_tx_info *control, u8 hlid)
{
- struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int ac, rate_idx;
s64 hosttime;
@@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
/* configure packet life time */
- getnstimeofday(&ts);
- hosttime = (timespec_to_ns(&ts) >> 10);
+ hosttime = (ktime_get_boot_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671abda..9bd7ddeeb6a5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index 21bda2031e7e..6d063cde39d1 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -2,6 +2,6 @@
# Makefile for the nubus specific drivers.
#
-obj-y := nubus.o
+obj-y := nubus.o bus.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
new file mode 100644
index 000000000000..d306c348c857
--- /dev/null
+++ b/drivers/nubus/bus.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Bus implementation for the NuBus subsystem.
+//
+// Copyright (C) 2017 Finn Thain
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/nubus.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define to_nubus_board(d) container_of(d, struct nubus_board, dev)
+#define to_nubus_driver(d) container_of(d, struct nubus_driver, driver)
+
+static int nubus_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return 1;
+}
+
+static int nubus_device_probe(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (ndrv->probe)
+ err = ndrv->probe(to_nubus_board(dev));
+ return err;
+}
+
+static int nubus_device_remove(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (dev->driver && ndrv->remove)
+ err = ndrv->remove(to_nubus_board(dev));
+ return err;
+}
+
+struct bus_type nubus_bus_type = {
+ .name = "nubus",
+ .match = nubus_bus_match,
+ .probe = nubus_device_probe,
+ .remove = nubus_device_remove,
+};
+EXPORT_SYMBOL(nubus_bus_type);
+
+int nubus_driver_register(struct nubus_driver *ndrv)
+{
+ ndrv->driver.bus = &nubus_bus_type;
+ return driver_register(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_register);
+
+void nubus_driver_unregister(struct nubus_driver *ndrv)
+{
+ driver_unregister(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_unregister);
+
+static struct device nubus_parent = {
+ .init_name = "nubus",
+};
+
+int __init nubus_bus_register(void)
+{
+ int err;
+
+ err = device_register(&nubus_parent);
+ if (err)
+ return err;
+
+ err = bus_register(&nubus_bus_type);
+ if (!err)
+ return 0;
+
+ device_unregister(&nubus_parent);
+ return err;
+}
+
+static void nubus_device_release(struct device *dev)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct nubus_rsrc *fres, *tmp;
+
+ list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list)
+ if (fres->board == board) {
+ list_del(&fres->list);
+ kfree(fres);
+ }
+ kfree(board);
+}
+
+int nubus_device_register(struct nubus_board *board)
+{
+ board->dev.parent = &nubus_parent;
+ board->dev.release = nubus_device_release;
+ board->dev.bus = &nubus_bus_type;
+ dev_set_name(&board->dev, "slot.%X", board->slot);
+ return device_register(&board->dev);
+}
+
+static int nubus_print_device_name_fn(struct device *dev, void *data)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct seq_file *m = data;
+
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ return 0;
+}
+
+int nubus_proc_show(struct seq_file *m, void *data)
+{
+ return bus_for_each_dev(&nubus_bus_type, NULL, m,
+ nubus_print_device_name_fn);
+}
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index b793727cd4f7..4621ff98138c 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -31,8 +32,7 @@
/* Globals */
-struct nubus_dev *nubus_devices;
-struct nubus_board *nubus_boards;
+LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -146,7 +146,7 @@ static inline void *nubus_rom_addr(int slot)
return (void *)(0xF1000000 + (slot << 24));
}
-static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
{
unsigned char *p = nd->base;
@@ -161,7 +161,7 @@ static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
pointed to with offsets) out of the card ROM. */
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
- int len)
+ unsigned int len)
{
unsigned char *t = (unsigned char *)dest;
unsigned char *p = nubus_dirptr(dirent);
@@ -173,21 +173,49 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
}
EXPORT_SYMBOL(nubus_get_rsrc_mem);
-void nubus_get_rsrc_str(void *dest, const struct nubus_dirent *dirent,
- int len)
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
- while (len) {
- *t = nubus_get_rom(&p, 1, dirent->mask);
- if (!*t++)
+ while (len > 1) {
+ unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
+
+ if (!c)
break;
+ *t++ = c;
len--;
}
+ if (len > 0)
+ *t = '\0';
+ return t - dest;
}
EXPORT_SYMBOL(nubus_get_rsrc_str);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len)
+{
+ unsigned long buf[32];
+ unsigned int buf_size = sizeof(buf);
+ unsigned char *p = nubus_dirptr(dirent);
+
+ /* If possible, write out full buffers */
+ while (len >= buf_size) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
+ dirent->mask);
+ seq_write(m, buf, buf_size);
+ len -= buf_size;
+ }
+ /* If not, write out individual bytes */
+ while (len--)
+ seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
+}
+
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir)
{
@@ -199,12 +227,11 @@ int nubus_get_root_dir(const struct nubus_board *board,
EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
-int nubus_get_func_dir(const struct nubus_dev *dev,
- struct nubus_dir *dir)
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir)
{
- dir->ptr = dir->base = dev->directory;
+ dir->ptr = dir->base = fres->directory;
dir->done = 0;
- dir->mask = dev->board->lanes;
+ dir->mask = fres->board->lanes;
return 0;
}
EXPORT_SYMBOL(nubus_get_func_dir);
@@ -277,51 +304,20 @@ EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
-struct nubus_dev*
-nubus_find_device(unsigned short category, unsigned short type,
- unsigned short dr_hw, unsigned short dr_sw,
- const struct nubus_dev *from)
-{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type &&
- itor->dr_hw == dr_hw && itor->dr_sw == dr_sw)
- return itor;
- itor = itor->next;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nubus_find_device);
-
-struct nubus_dev*
-nubus_find_type(unsigned short category, unsigned short type,
- const struct nubus_dev *from)
+struct nubus_rsrc *nubus_first_rsrc_or_null(void)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc,
+ list);
}
-EXPORT_SYMBOL(nubus_find_type);
+EXPORT_SYMBOL(nubus_first_rsrc_or_null);
-struct nubus_dev*
-nubus_find_slot(unsigned int slot, const struct nubus_dev *from)
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->board->slot == slot)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ if (list_is_last(&from->list, &nubus_func_rsrcs))
+ return NULL;
+ return list_next_entry(from, list);
}
-EXPORT_SYMBOL(nubus_find_slot);
+EXPORT_SYMBOL(nubus_next_rsrc_or_null);
int
nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type,
@@ -339,31 +335,83 @@ EXPORT_SYMBOL(nubus_find_rsrc);
looking at, and print out lots and lots of information from the
resource blocks. */
-/* FIXME: A lot of this stuff will eventually be useful after
- initialization, for intelligently probing Ethernet and video chips,
- among other things. The rest of it should go in the /proc code.
- For now, we just use it to give verbose boot logs. */
+static int __init nubus_get_block_rsrc_dir(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
-static int __init nubus_show_display_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+ while (nubus_readdir(&dir, &ent) != -1) {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type, size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_vidmode(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
+
+ while (nubus_readdir(&dir, &ent) != -1) {
+ switch (ent.type) {
+ case 1: /* mVidParams */
+ case 2: /* mTable */
+ {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type,
+ size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ break;
+ }
+ default:
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
+ }
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_GAMMADIR:
- pr_info(" gamma directory offset: 0x%06x\n", ent->data);
+ pr_debug(" gamma directory offset: 0x%06x\n", ent->data);
+ nubus_get_block_rsrc_dir(fres->board, procdir, ent);
break;
case 0x0080 ... 0x0085:
- pr_info(" mode %02X info offset: 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" mode 0x%02x info offset: 0x%06x\n",
+ ent->type, ent->data);
+ nubus_get_display_vidmode(fres->board, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_network_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_network_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MAC_ADDRESS:
@@ -371,18 +419,21 @@ static int __init nubus_show_network_resource(struct nubus_dev *dev,
char addr[6];
nubus_get_rsrc_mem(addr, ent, 6);
- pr_info(" MAC address: %pM\n", addr);
+ pr_debug(" MAC address: %pM\n", addr);
+ nubus_proc_add_rsrc_mem(procdir, ent, 6);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MEMINFO:
@@ -390,8 +441,9 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long meminfo[2];
nubus_get_rsrc_mem(&meminfo, ent, 8);
- pr_info(" memory: [ 0x%08lx 0x%08lx ]\n",
- meminfo[0], meminfo[1]);
+ pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n",
+ meminfo[0], meminfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
case NUBUS_RESID_ROMINFO:
@@ -399,57 +451,60 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long rominfo[2];
nubus_get_rsrc_mem(&rominfo, ent, 8);
- pr_info(" ROM: [ 0x%08lx 0x%08lx ]\n",
- rominfo[0], rominfo[1]);
+ pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n",
+ rominfo[0], rominfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_private_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_private_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
- switch (dev->category) {
+ switch (fres->category) {
case NUBUS_CAT_DISPLAY:
- nubus_show_display_resource(dev, ent);
+ nubus_get_display_resource(fres, procdir, ent);
break;
case NUBUS_CAT_NETWORK:
- nubus_show_network_resource(dev, ent);
+ nubus_get_network_resource(fres, procdir, ent);
break;
case NUBUS_CAT_CPU:
- nubus_show_cpu_resource(dev, ent);
+ nubus_get_cpu_resource(fres, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static struct nubus_dev * __init
+static struct nubus_rsrc * __init
nubus_get_functional_resource(struct nubus_board *board, int slot,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
struct nubus_dirent ent;
- struct nubus_dev *dev;
+ struct nubus_rsrc *fres;
- pr_info(" Function 0x%02x:\n", parent->type);
+ pr_debug(" Functional resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
-
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
/* Actually we should probably panic if this fails */
- if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ fres = kzalloc(sizeof(*fres), GFP_ATOMIC);
+ if (!fres)
return NULL;
- dev->resid = parent->type;
- dev->directory = dir.base;
- dev->board = board;
+ fres->resid = parent->type;
+ fres->directory = dir.base;
+ fres->board = board;
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -458,130 +513,96 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
unsigned short nbtdata[4];
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- dev->category = nbtdata[0];
- dev->type = nbtdata[1];
- dev->dr_sw = nbtdata[2];
- dev->dr_hw = nbtdata[3];
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ fres->category = nbtdata[0];
+ fres->type = nbtdata[1];
+ fres->dr_sw = nbtdata[2];
+ fres->dr_hw = nbtdata[3];
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
{
- nubus_get_rsrc_str(dev->name, &ent, 64);
- pr_info(" name: %s\n", dev->name);
+ char name[64];
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ pr_debug(" name: %s\n", name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
}
case NUBUS_RESID_DRVRDIR:
{
/* MacOS driver. If we were NetBSD we might
use this :-) */
- struct nubus_dir drvr_dir;
- struct nubus_dirent drvr_ent;
-
- nubus_get_subdir(&ent, &drvr_dir);
- nubus_readdir(&drvr_dir, &drvr_ent);
- dev->driver = nubus_dirptr(&drvr_ent);
- pr_info(" driver at: 0x%p\n", dev->driver);
+ pr_debug(" driver directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
}
case NUBUS_RESID_MINOR_BASEOS:
+ {
/* We will need this in order to support
multiple framebuffers. It might be handy
for Ethernet as well */
- nubus_get_rsrc_mem(&dev->iobase, &ent, 4);
- pr_info(" memory offset: 0x%08lx\n", dev->iobase);
+ u32 base_offset;
+
+ nubus_get_rsrc_mem(&base_offset, &ent, 4);
+ pr_debug(" memory offset: 0x%08x\n", base_offset);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_MINOR_LENGTH:
+ {
/* Ditto */
- nubus_get_rsrc_mem(&dev->iosize, &ent, 4);
- pr_info(" memory length: 0x%08lx\n", dev->iosize);
+ u32 length;
+
+ nubus_get_rsrc_mem(&length, &ent, 4);
+ pr_debug(" memory length: 0x%08x\n", length);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_FLAGS:
- dev->flags = ent.data;
- pr_info(" flags: 0x%06x\n", dev->flags);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- dev->hwdevid = ent.data;
- pr_info(" hwdevid: 0x%06x\n", dev->hwdevid);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
/* Local/Private resources have their own
function */
- nubus_show_private_resource(dev, &ent);
+ nubus_get_private_resource(fres, dir.procdir, &ent);
}
}
- return dev;
-}
-
-/* This is cool. */
-static int __init nubus_get_vidnames(struct nubus_board *board,
- const struct nubus_dirent *parent)
-{
- struct nubus_dir dir;
- struct nubus_dirent ent;
-
- /* FIXME: obviously we want to put this in a header file soon */
- struct vidmode {
- u32 size;
- /* Don't know what this is yet */
- u16 id;
- /* Longest one I've seen so far is 26 characters */
- char name[32];
- };
-
- pr_info(" video modes supported:\n");
- nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
-
- while (nubus_readdir(&dir, &ent) != -1) {
- struct vidmode mode;
- u32 size;
-
- /* First get the length */
- nubus_get_rsrc_mem(&size, &ent, 4);
-
- /* Now clobber the whole thing */
- if (size > sizeof(mode) - 1)
- size = sizeof(mode) - 1;
- memset(&mode, 0, sizeof(mode));
- nubus_get_rsrc_mem(&mode, &ent, size);
- pr_info(" %02X: (%02X) %s\n", ent.type,
- mode.id, mode.name);
- }
- return 0;
+ return fres;
}
/* This is *really* cool. */
static int __init nubus_get_icon(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *ent)
{
/* Should be 32x32 if my memory serves me correctly */
- unsigned char icon[128];
- int x, y;
+ u32 icon[32];
+ int i;
nubus_get_rsrc_mem(&icon, ent, 128);
- pr_info(" icon:\n");
-
- /* We should actually plot these somewhere in the framebuffer
- init. This is just to demonstrate that they do, in fact,
- exist */
- for (y = 0; y < 32; y++) {
- pr_info(" ");
- for (x = 0; x < 32; x++) {
- if (icon[y * 4 + x / 8] & (0x80 >> (x % 8)))
- pr_cont("*");
- else
- pr_cont(" ");
- }
- pr_cont("\n");
- }
+ pr_debug(" icon:\n");
+ for (i = 0; i < 8; i++)
+ pr_debug(" %08x %08x %08x %08x\n",
+ icon[i * 4 + 0], icon[i * 4 + 1],
+ icon[i * 4 + 2], icon[i * 4 + 3]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 128);
+
return 0;
}
static int __init nubus_get_vendorinfo(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
@@ -589,19 +610,20 @@ static int __init nubus_get_vendorinfo(struct nubus_board *board,
static char *vendor_fields[6] = { "ID", "serial", "revision",
"part", "date", "unknown field" };
- pr_info(" vendor info:\n");
+ pr_debug(" vendor info:\n");
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
char name[64];
+ unsigned int len;
/* These are all strings, we think */
- nubus_get_rsrc_str(name, &ent, 64);
- if (ent.type > 5)
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ if (ent.type < 1 || ent.type > 5)
ent.type = 5;
- pr_info(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
}
return 0;
}
@@ -612,9 +634,9 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
struct nubus_dir dir;
struct nubus_dirent ent;
+ pr_debug(" Board resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -625,64 +647,81 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
useful except insofar as it tells us that
we really are looking at a board resource. */
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
if (nbtdata[0] != 1 || nbtdata[1] != 0 ||
nbtdata[2] != 0 || nbtdata[3] != 0)
- pr_err("this sResource is not a board resource!\n");
+ pr_err("Slot %X: sResource is not a board resource!\n",
+ slot);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
- nubus_get_rsrc_str(board->name, &ent, 64);
- pr_info(" name: %s\n", board->name);
+ {
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(board->name, &ent,
+ sizeof(board->name));
+ pr_debug(" name: %s\n", board->name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
+ }
case NUBUS_RESID_ICON:
- nubus_get_icon(board, &ent);
+ nubus_get_icon(board, dir.procdir, &ent);
break;
case NUBUS_RESID_BOARDID:
- pr_info(" board id: 0x%x\n", ent.data);
+ pr_debug(" board id: 0x%x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_PRIMARYINIT:
- pr_info(" primary init offset: 0x%06x\n", ent.data);
+ pr_debug(" primary init offset: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_VENDORINFO:
- nubus_get_vendorinfo(board, &ent);
+ nubus_get_vendorinfo(board, dir.procdir, &ent);
break;
case NUBUS_RESID_FLAGS:
- pr_info(" flags: 0x%06x\n", ent.data);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- pr_info(" hwdevid: 0x%06x\n", ent.data);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_SECONDINIT:
- pr_info(" secondary init offset: 0x%06x\n", ent.data);
+ pr_debug(" secondary init offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
/* WTF isn't this in the functional resources? */
case NUBUS_RESID_VIDNAMES:
- nubus_get_vidnames(board, &ent);
+ pr_debug(" vidnames directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
/* Same goes for this */
case NUBUS_RESID_VIDMODES:
- pr_info(" video mode parameter directory offset: 0x%06x\n",
- ent.data);
+ pr_debug(" video mode parameter directory offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent.type, ent.data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
}
}
return 0;
}
-/* Add a board (might be many devices) to the list */
-static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
+static void __init nubus_add_board(int slot, int bytelanes)
{
struct nubus_board *board;
- struct nubus_board **boardp;
unsigned char *rp;
unsigned long dpat;
struct nubus_dir dir;
struct nubus_dirent ent;
+ int prev_resid = -1;
/* Move to the start of the format block */
rp = nubus_rom_addr(slot);
@@ -690,19 +729,19 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Actually we should probably panic if this fails */
if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
- return NULL;
+ return;
board->fblock = rp;
/* Dump the format block for debugging purposes */
pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp = board->fblock;
board->slot = slot;
@@ -722,10 +761,10 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Directory offset should be small and negative... */
if (!(board->doffset & 0x00FF0000))
- pr_warn("Dodgy doffset!\n");
+ pr_warn("Slot %X: Dodgy doffset!\n", slot);
dpat = nubus_get_rom(&rp, 4, bytelanes);
if (dpat != NUBUS_TEST_PATTERN)
- pr_warn("Wrong test pattern %08lx!\n", dpat);
+ pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat);
/*
* I wonder how the CRC is meant to work -
@@ -742,53 +781,52 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
nubus_get_root_dir(board, &dir);
/* We're ready to rock */
- pr_info("Slot %X:\n", slot);
+ pr_debug("Slot %X resources:\n", slot);
/* Each slot should have one board resource and any number of
- functional resources. So we'll fill in some fields in the
- struct nubus_board from the board resource, then walk down
- the list of functional resources, spinning out a nubus_dev
- for each of them. */
+ * functional resources. So we'll fill in some fields in the
+ * struct nubus_board from the board resource, then walk down
+ * the list of functional resources, spinning out a nubus_rsrc
+ * for each of them.
+ */
if (nubus_readdir(&dir, &ent) == -1) {
/* We can't have this! */
- pr_err("Board resource not found!\n");
- return NULL;
- } else {
- pr_info(" Board resource:\n");
- nubus_get_board_resource(board, slot, &ent);
+ pr_err("Slot %X: Board resource not found!\n", slot);
+ kfree(board);
+ return;
}
+ if (ent.type < 1 || ent.type > 127)
+ pr_warn("Slot %X: Board resource ID is invalid!\n", slot);
+
+ board->procdir = nubus_proc_add_board(board);
+
+ nubus_get_board_resource(board, slot, &ent);
+
while (nubus_readdir(&dir, &ent) != -1) {
- struct nubus_dev *dev;
- struct nubus_dev **devp;
+ struct nubus_rsrc *fres;
- dev = nubus_get_functional_resource(board, slot, &ent);
- if (dev == NULL)
+ fres = nubus_get_functional_resource(board, slot, &ent);
+ if (fres == NULL)
continue;
- /* We zeroed this out above */
- if (board->first_dev == NULL)
- board->first_dev = dev;
+ /* Resources should appear in ascending ID order. This sanity
+ * check prevents duplicate resource IDs.
+ */
+ if (fres->resid <= prev_resid) {
+ kfree(fres);
+ continue;
+ }
+ prev_resid = fres->resid;
- /* Put it on the global NuBus device chain. Keep entries in order. */
- for (devp = &nubus_devices; *devp != NULL;
- devp = &((*devp)->next))
- /* spin */;
- *devp = dev;
- dev->next = NULL;
+ list_add_tail(&fres->list, &nubus_func_rsrcs);
}
- /* Put it on the global NuBus board chain. Keep entries in order. */
- for (boardp = &nubus_boards; *boardp != NULL;
- boardp = &((*boardp)->next))
- /* spin */;
- *boardp = board;
- board->next = NULL;
-
- return board;
+ if (nubus_device_register(board))
+ put_device(&board->dev);
}
-void __init nubus_probe_slot(int slot)
+static void __init nubus_probe_slot(int slot)
{
unsigned char dp;
unsigned char *rp;
@@ -796,11 +834,8 @@ void __init nubus_probe_slot(int slot)
rp = nubus_rom_addr(slot);
for (i = 4; i; i--) {
- int card_present;
-
rp--;
- card_present = hwreg_present(rp);
- if (!card_present)
+ if (!hwreg_present(rp))
continue;
dp = *rp;
@@ -822,10 +857,11 @@ void __init nubus_probe_slot(int slot)
}
}
-void __init nubus_scan_bus(void)
+static void __init nubus_scan_bus(void)
{
int slot;
+ pr_info("NuBus: Scanning NuBus slots.\n");
for (slot = 9; slot < 15; slot++) {
nubus_probe_slot(slot);
}
@@ -833,14 +869,16 @@ void __init nubus_scan_bus(void)
static int __init nubus_init(void)
{
+ int err;
+
if (!MACH_IS_MAC)
return 0;
- pr_info("NuBus: Scanning NuBus slots.\n");
- nubus_devices = NULL;
- nubus_boards = NULL;
- nubus_scan_bus();
nubus_proc_init();
+ err = nubus_bus_register();
+ if (err)
+ return err;
+ nubus_scan_bus();
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 004a122ac0ff..c2e5a7e6bd3e 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -11,39 +11,37 @@
structure in /proc analogous to the structure of the NuBus ROM
resources.
- Therefore each NuBus device is in fact a directory, which may in
- turn contain subdirectories. The "files" correspond to NuBus
- resource records. For those types of records which we know how to
- convert to formats that are meaningful to userspace (mostly just
- icons) these files will provide "cooked" data. Otherwise they will
- simply provide raw access (read-only of course) to the ROM. */
+ Therefore each board function gets a directory, which may in turn
+ contain subdirectories. Each slot resource is a file. Unrecognized
+ resources are empty files, since every resource ID requires a special
+ case (e.g. if the resource ID implies a directory or block, then its
+ value has to be interpreted as a slot ROM pointer etc.).
+ */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-
#include <linux/uaccess.h>
#include <asm/byteorder.h>
+/*
+ * /proc/bus/nubus/devices stuff
+ */
+
static int
nubus_devices_proc_show(struct seq_file *m, void *v)
{
- struct nubus_dev *dev = nubus_devices;
-
- while (dev) {
- seq_printf(m, "%x\t%04x %04x %04x %04x",
- dev->board->slot,
- dev->category,
- dev->type,
- dev->dr_sw,
- dev->dr_hw);
- seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
- dev = dev->next;
- }
+ struct nubus_rsrc *fres;
+
+ for_each_func_rsrc(fres)
+ seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n",
+ fres->board->slot, fres->category, fres->type,
+ fres->dr_sw, fres->dr_hw, fres->board->slot_addr);
return 0;
}
@@ -61,174 +59,163 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
-static const struct file_operations nubus_proc_subdir_fops = {
-#warning Need to set some I/O handlers here
-};
+/*
+ * /proc/bus/nubus/x/ stuff
+ */
-static void nubus_proc_subdir(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* dir)
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* Some of these are directories, others aren't */
- while (nubus_readdir(dir, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
-
- sprintf(name, "%x", ent.type);
- e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
- &nubus_proc_subdir_fops);
- if (!e)
- return;
- }
+ char name[2];
+
+ if (!proc_bus_nubus_dir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", board->slot);
+ return proc_mkdir(name, proc_bus_nubus_dir);
}
-/* Can't do this recursively since the root directory is structured
- somewhat differently from the subdirectories */
-static void nubus_proc_populate(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* root)
+/* The PDE private data for any directory under /proc/bus/nubus/x/
+ * is the bytelanes value for the board in slot x.
+ */
+
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* We know these are all directories (board resource + one or
- more functional resources) */
- while (nubus_readdir(root, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
- struct nubus_dir dir;
-
- sprintf(name, "%x", ent.type);
- e = proc_mkdir(name, parent);
- if (!e) return;
-
- /* And descend */
- if (nubus_get_subdir(&ent, &dir) == -1) {
- /* This shouldn't happen */
- printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n",
- dev->board->slot, ent.type);
- continue;
- } else {
- nubus_proc_subdir(dev, e, &dir);
- }
- }
+ char name[9];
+ int lanes = board->lanes;
+
+ if (!procdir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", ent->type);
+ return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
-int nubus_proc_attach_device(struct nubus_dev *dev)
+/* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to
+ * an instance of the following structure, which gives the location and size
+ * of the resource data in the slot ROM. For slot resources which hold only a
+ * small integer, this integer value is stored directly and size is set to 0.
+ * A NULL private data pointer indicates an unrecognized resource.
+ */
+
+struct nubus_proc_pde_data {
+ unsigned char *res_ptr;
+ unsigned int res_size;
+};
+
+static struct nubus_proc_pde_data *
+nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct proc_dir_entry *e;
- struct nubus_dir root;
- char name[8];
-
- if (dev == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- return -1;
- }
-
- if (dev->board == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- printk("dev = %p, dev->board = %p\n", dev, dev->board);
- return -1;
- }
-
- /* Create a directory */
- sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
- if (!e)
- return -ENOMEM;
+ struct nubus_proc_pde_data *pde_data;
- /* Now recursively populate it with files */
- nubus_get_root_dir(dev->board, &root);
- nubus_proc_populate(dev, e, &root);
+ pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
+ if (!pde_data)
+ return NULL;
- return 0;
+ pde_data->res_ptr = ptr;
+ pde_data->res_size = size;
+ return pde_data;
}
-EXPORT_SYMBOL(nubus_proc_attach_device);
-/*
- * /proc/nubus stuff
- */
-static int nubus_proc_show(struct seq_file *m, void *v)
+static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
- const struct nubus_board *board = v;
+ struct inode *inode = m->private;
+ struct nubus_proc_pde_data *pde_data;
- /* Display header on line 1 */
- if (v == SEQ_START_TOKEN)
- seq_puts(m, "Nubus devices found:\n");
- else
- seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ pde_data = PDE_DATA(inode);
+ if (!pde_data)
+ return 0;
+
+ if (pde_data->res_size > m->size)
+ return -EFBIG;
+
+ if (pde_data->res_size) {
+ int lanes = (int)proc_get_parent_data(inode);
+ struct nubus_dirent ent;
+
+ if (!lanes)
+ return 0;
+
+ ent.mask = lanes;
+ ent.base = pde_data->res_ptr;
+ ent.data = 0;
+ nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ } else {
+ unsigned int data = (unsigned int)pde_data->res_ptr;
+
+ seq_putc(m, data >> 16);
+ seq_putc(m, data >> 8);
+ seq_putc(m, data >> 0);
+ }
return 0;
}
-static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+static int nubus_proc_rsrc_open(struct inode *inode, struct file *file)
{
- struct nubus_board *board;
- unsigned pos;
-
- if (*_pos > LONG_MAX)
- return NULL;
- pos = *_pos;
- if (pos == 0)
- return SEQ_START_TOKEN;
- for (board = nubus_boards; board; board = board->next)
- if (--pos == 0)
- break;
- return board;
+ return single_open(file, nubus_proc_rsrc_show, inode);
}
-static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+static const struct file_operations nubus_proc_rsrc_fops = {
+ .open = nubus_proc_rsrc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size)
{
- /* Walk the list of NuBus boards */
- struct nubus_board *board = v;
-
- ++*_pos;
- if (v == SEQ_START_TOKEN)
- board = nubus_boards;
- else if (board)
- board = board->next;
- return board;
+ char name[9];
+ struct nubus_proc_pde_data *pde_data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ if (size)
+ pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ else
+ pde_data = NULL;
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops, pde_data);
}
-static void nubus_proc_stop(struct seq_file *p, void *v)
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
+ char name[9];
+ unsigned char *data = (unsigned char *)ent->data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops,
+ nubus_proc_alloc_pde_data(data, 0));
}
-static const struct seq_operations nubus_proc_seqops = {
- .start = nubus_proc_start,
- .next = nubus_proc_next,
- .stop = nubus_proc_stop,
- .show = nubus_proc_show,
-};
+/*
+ * /proc/nubus stuff
+ */
static int nubus_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nubus_proc_seqops);
+ return single_open(file, nubus_proc_show, NULL);
}
static const struct file_operations nubus_proc_fops = {
.open = nubus_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-void __init proc_bus_nubus_add_devices(void)
-{
- struct nubus_dev *dev;
-
- for(dev = nubus_devices; dev; dev = dev->next)
- nubus_proc_attach_device(dev);
-}
-
void __init nubus_proc_init(void)
{
proc_create("nubus", 0, NULL, &nubus_proc_fops);
- if (!MACH_IS_MAC)
- return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+ if (!proc_bus_nubus_dir)
+ return;
proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
- proc_bus_nubus_add_devices();
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e949e3302af4..c586bcdb5190 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
return ret;
}
-static int btt_log_read_pair(struct arena_info *arena, u32 lane,
- struct log_entry *ent)
+static int btt_log_group_read(struct arena_info *arena, u32 lane,
+ struct log_group *log)
{
return arena_read_bytes(arena,
- arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
- 2 * LOG_ENT_SIZE, 0);
+ arena->logoff + (lane * LOG_GRP_SIZE), log,
+ LOG_GRP_SIZE, 0);
}
static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
+ debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
+ debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
}
static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
}
}
+static u32 log_seq(struct log_group *log, int log_idx)
+{
+ return le32_to_cpu(log->ent[log_idx].seq);
+}
+
/*
* This function accepts two log entries, and uses the
* sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
*
* TODO The logic feels a bit kludge-y. make it better..
*/
-static int btt_log_get_old(struct log_entry *ent)
+static int btt_log_get_old(struct arena_info *a, struct log_group *log)
{
+ int idx0 = a->log_index[0];
+ int idx1 = a->log_index[1];
int old;
/*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
* the next time, the following logic works out to put this
* (next) entry into [1]
*/
- if (ent[0].seq == 0) {
- ent[0].seq = cpu_to_le32(1);
+ if (log_seq(log, idx0) == 0) {
+ log->ent[idx0].seq = cpu_to_le32(1);
return 0;
}
- if (ent[0].seq == ent[1].seq)
+ if (log_seq(log, idx0) == log_seq(log, idx1))
return -EINVAL;
- if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
+ if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
return -EINVAL;
- if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
- if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
+ if (log_seq(log, idx0) < log_seq(log, idx1)) {
+ if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
old = 0;
else
old = 1;
} else {
- if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
+ if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
old = 1;
else
old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
{
int ret;
int old_ent, ret_ent;
- struct log_entry log[2];
+ struct log_group log;
- ret = btt_log_read_pair(arena, lane, log);
+ ret = btt_log_group_read(arena, lane, &log);
if (ret)
return -EIO;
- old_ent = btt_log_get_old(log);
+ old_ent = btt_log_get_old(arena, &log);
if (old_ent < 0 || old_ent > 1) {
dev_err(to_dev(arena),
"log corruption (%d): lane %d seq [%d, %d]\n",
- old_ent, lane, log[0].seq, log[1].seq);
+ old_ent, lane, log.ent[arena->log_index[0]].seq,
+ log.ent[arena->log_index[1]].seq);
/* TODO set error state? */
return -EIO;
}
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
ret_ent = (old_flag ? old_ent : (1 - old_ent));
if (ent != NULL)
- memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
+ memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
return ret_ent;
}
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
u32 sub, struct log_entry *ent, unsigned long flags)
{
int ret;
- /*
- * Ignore the padding in log_entry for calculating log_half.
- * The entry is 'committed' when we write the sequence number,
- * and we want to ensure that that is the last thing written.
- * We don't bother writing the padding as that would be extra
- * media wear and write amplification
- */
- unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
- u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
+ u32 group_slot = arena->log_index[sub];
+ unsigned int log_half = LOG_ENT_SIZE / 2;
void *src = ent;
+ u64 ns_off;
+ ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
+ (group_slot * LOG_ENT_SIZE);
/* split the 16B write into atomic, durable halves */
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
{
size_t logsize = arena->info2off - arena->logoff;
size_t chunk_size = SZ_4K, offset = 0;
- struct log_entry log;
+ struct log_entry ent;
void *zerobuf;
int ret;
u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
}
for (i = 0; i < arena->nfree; i++) {
- log.lba = cpu_to_le32(i);
- log.old_map = cpu_to_le32(arena->external_nlba + i);
- log.new_map = cpu_to_le32(arena->external_nlba + i);
- log.seq = cpu_to_le32(LOG_SEQ_INIT);
- ret = __btt_log_write(arena, i, 0, &log, 0);
+ ent.lba = cpu_to_le32(i);
+ ent.old_map = cpu_to_le32(arena->external_nlba + i);
+ ent.new_map = cpu_to_le32(arena->external_nlba + i);
+ ent.seq = cpu_to_le32(LOG_SEQ_INIT);
+ ret = __btt_log_write(arena, i, 0, &ent, 0);
if (ret)
goto free;
}
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
return 0;
}
+static bool ent_is_padding(struct log_entry *ent)
+{
+ return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
+ && (ent->seq == 0);
+}
+
+/*
+ * Detecting valid log indices: We read a log group (see the comments in btt.h
+ * for a description of a 'log_group' and its 'slots'), and iterate over its
+ * four slots. We expect that a padding slot will be all-zeroes, and use this
+ * to detect a padding slot vs. an actual entry.
+ *
+ * If a log_group is in the initial state, i.e. hasn't been used since the
+ * creation of this BTT layout, it will have three of the four slots with
+ * zeroes. We skip over these log_groups for the detection of log_index. If
+ * all log_groups are in the initial state (i.e. the BTT has never been
+ * written to), it is safe to assume the 'new format' of log entries in slots
+ * (0, 1).
+ */
+static int log_set_indices(struct arena_info *arena)
+{
+ bool idx_set = false, initial_state = true;
+ int ret, log_index[2] = {-1, -1};
+ u32 i, j, next_idx = 0;
+ struct log_group log;
+ u32 pad_count = 0;
+
+ for (i = 0; i < arena->nfree; i++) {
+ ret = btt_log_group_read(arena, i, &log);
+ if (ret < 0)
+ return ret;
+
+ for (j = 0; j < 4; j++) {
+ if (!idx_set) {
+ if (ent_is_padding(&log.ent[j])) {
+ pad_count++;
+ continue;
+ } else {
+ /* Skip if index has been recorded */
+ if ((next_idx == 1) &&
+ (j == log_index[0]))
+ continue;
+ /* valid entry, record index */
+ log_index[next_idx] = j;
+ next_idx++;
+ }
+ if (next_idx == 2) {
+ /* two valid entries found */
+ idx_set = true;
+ } else if (next_idx > 2) {
+ /* too many valid indices */
+ return -ENXIO;
+ }
+ } else {
+ /*
+ * once the indices have been set, just verify
+ * that all subsequent log groups are either in
+ * their initial state or follow the same
+ * indices.
+ */
+ if (j == log_index[0]) {
+ /* entry must be 'valid' */
+ if (ent_is_padding(&log.ent[j]))
+ return -ENXIO;
+ } else if (j == log_index[1]) {
+ ;
+ /*
+ * log_index[1] can be padding if the
+ * lane never got used and it is still
+ * in the initial state (three 'padding'
+ * entries)
+ */
+ } else {
+ /* entry must be invalid (padding) */
+ if (!ent_is_padding(&log.ent[j]))
+ return -ENXIO;
+ }
+ }
+ }
+ /*
+ * If any of the log_groups have more than one valid,
+ * non-padding entry, then the we are no longer in the
+ * initial_state
+ */
+ if (pad_count < 3)
+ initial_state = false;
+ pad_count = 0;
+ }
+
+ if (!initial_state && !idx_set)
+ return -ENXIO;
+
+ /*
+ * If all the entries in the log were in the initial state,
+ * assume new padding scheme
+ */
+ if (initial_state)
+ log_index[1] = 1;
+
+ /*
+ * Only allow the known permutations of log/padding indices,
+ * i.e. (0, 1), and (0, 2)
+ */
+ if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+ ; /* known index possibilities */
+ else {
+ dev_err(to_dev(arena), "Found an unknown padding scheme\n");
+ return -ENXIO;
+ }
+
+ arena->log_index[0] = log_index[0];
+ arena->log_index[1] = log_index[1];
+ dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
+ dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
+ return 0;
+}
+
static int btt_rtt_init(struct arena_info *arena)
{
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
available -= 2 * BTT_PG_SIZE;
/* The log takes a fixed amount of space based on nfree */
- logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
- BTT_PG_SIZE);
+ logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
available -= logsize;
/* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
arena->mapoff = arena->dataoff + datasize;
arena->logoff = arena->mapoff + mapsize;
arena->info2off = arena->logoff + logsize;
+
+ /* Default log indices are (0,1) */
+ arena->log_index[0] = 0;
+ arena->log_index[1] = 1;
return arena;
}
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
arena->external_lba_start = cur_nlba;
parse_arena_meta(arena, super, cur_off);
+ ret = log_set_indices(arena);
+ if (ret) {
+ dev_err(to_dev(arena),
+ "Unable to deduce log/padding indices\n");
+ goto out;
+ }
+
mutex_init(&arena->err_lock);
ret = btt_freelist_init(arena);
if (ret)
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 578c2057524d..db3cb6d4d0d4 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -27,6 +27,7 @@
#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
#define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
#define LOG_ENT_SIZE sizeof(struct log_entry)
#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
INIT_READY
};
+/*
+ * A log group represents one log 'lane', and consists of four log entries.
+ * Two of the four entries are valid entries, and the remaining two are
+ * padding. Due to an old bug in the padding location, we need to perform a
+ * test to determine the padding scheme being used, and use that scheme
+ * thereafter.
+ *
+ * In kernels prior to 4.15, 'log group' would have actual log entries at
+ * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
+ * format has log entries at indices (0, 1) and padding at indices (2, 3).
+ *
+ * Old (pre 4.15) format:
+ * +-----------------+-----------------+
+ * | ent[0] | ent[1] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | pad |
+ * +-----------------------------------+
+ * | ent[2] | ent[3] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | pad |
+ * +-----------------+-----------------+
+ *
+ * New format:
+ * +-----------------+-----------------+
+ * | ent[0] | ent[1] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | lba/old/new/seq |
+ * +-----------------------------------+
+ * | ent[2] | ent[3] |
+ * | 16B | 16B |
+ * | pad | pad |
+ * +-----------------+-----------------+
+ *
+ * We detect during start-up which format is in use, and set
+ * arena->log_index[(0, 1)] with the detected format.
+ */
+
struct log_entry {
__le32 lba;
__le32 old_map;
__le32 new_map;
__le32 seq;
- __le64 padding[2];
+};
+
+struct log_group {
+ struct log_entry ent[4];
};
struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
* @list: List head for list of arenas
* @debugfs_dir: Debugfs dentry
* @flags: Arena flags - may signify error states.
+ * @err_lock: Mutex for synchronizing error clearing.
+ * @log_index: Indices of the valid log entries in a log_group
*
* arena_info is a per-arena handle. Once an arena is narrowed down for an
* IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
/* Arena flags */
u32 flags;
struct mutex err_lock;
+ int log_index[2];
};
/**
@@ -176,6 +220,7 @@ struct arena_info {
* @init_lock: Mutex used for the BTT initialization
* @init_state: Flag describing the initialization state for the BTT
* @num_arenas: Number of arenas in the BTT instance
+ * @phys_bb: Pointer to the namespace's badblocks structure
*/
struct btt {
struct gendisk *btt_disk;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 65cc171c721d..2adada1a5855 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
- unsigned long align;
enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
+ unsigned long align, start_pad;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
align = le32_to_cpu(pfn_sb->align);
offset = le64_to_cpu(pfn_sb->dataoff);
+ start_pad = le32_to_cpu(pfn_sb->start_pad);
if (align == 0)
align = 1UL << ilog2(offset);
mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EBUSY;
}
- if ((align && !IS_ALIGNED(offset, align))
+ if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
return altmap;
}
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+ return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+ ALIGN_DOWN(phys, nd_pfn->align));
+}
+
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
start = nsio->res.start;
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
+ IORES_DESC_NONE) == REGION_MIXED
+ || !IS_ALIGNED(start + resource_size(&nsio->res),
+ nd_pfn->align)) {
size = resource_size(&nsio->res);
- end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+ start + size);
}
if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc);
/*
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a25fd43650ad..441e67e3a9d7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f837d666cbd4..e8104871cbbf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -29,6 +29,9 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvme.h"
#include "fabrics.h"
@@ -65,9 +68,26 @@ static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such are scan, aen handling, fw activation,
+ * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -89,13 +109,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->reset_work))
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -104,6 +124,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work);
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work)
{
@@ -122,7 +143,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
}
@@ -157,13 +178,20 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
return BLK_STS_NOTSUPP;
case NVME_SC_WRITE_FAULT:
case NVME_SC_READ_ERROR:
case NVME_SC_UNWRITTEN_BLOCK:
case NVME_SC_ACCESS_DENIED:
case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
return BLK_STS_MEDIUM;
case NVME_SC_GUARD_CHECK:
case NVME_SC_APPTAG_CHECK:
@@ -190,8 +218,12 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
- if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req)) {
+ blk_status_t status = nvme_error_status(req);
+
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+ if (nvme_req_needs_failover(req, status)) {
nvme_failover_req(req);
return;
}
@@ -202,8 +234,7 @@ void nvme_complete_rq(struct request *req)
return;
}
}
-
- blk_mq_end_request(req, nvme_error_status(req));
+ blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -232,6 +263,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
+ case NVME_CTRL_ADMIN_ONLY:
+ switch (old_state) {
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -247,6 +287,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -266,6 +307,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING:
changed = true;
@@ -591,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
+ if (ns)
+ trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+ else
+ trace_nvme_setup_admin_cmd(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -1217,16 +1263,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
- return -ENXIO;
+ goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
- return -ENXIO;
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1287,7 +1344,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_alignment = size;
+ queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size;
blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1335,6 +1392,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned short bs = 1 << ns->lba_shift;
unsigned stream_alignment = 0;
if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1401,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
- blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+ blk_queue_logical_block_size(disk->queue, bs);
+ blk_queue_physical_block_size(disk->queue, bs);
+ blk_queue_io_min(disk->queue, bs);
+
if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -1705,7 +1766,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2047,6 +2109,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+{
+ int count = 0;
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD)
+ count++;
+ }
+ mutex_unlock(&subsys->lock);
+
+ return count;
+}
+
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
struct nvme_subsystem *subsys, *found;
@@ -2085,7 +2163,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (!(id->cmic & (1 << 1))) {
+ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2252,7 +2330,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout)
- dev_warn(ctrl->device,
+ dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout);
} else
@@ -2336,8 +2414,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- if (ctrl->state != NVME_CTRL_LIVE)
+ switch (ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
+ break;
+ default:
return -EWOULDBLOCK;
+ }
+
file->private_data = ctrl;
return 0;
}
@@ -2601,6 +2685,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -2869,7 +2954,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- nvme_setup_streams_ns(ctrl, ns);
id = nvme_identify_ns(ctrl, nsid);
if (!id)
@@ -2880,6 +2964,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_init_ns_head(ns, nsid, id, &new))
goto out_free_id;
+ nvme_setup_streams_ns(ctrl, ns);
#ifdef CONFIG_NVME_MULTIPATH
/*
@@ -2965,8 +3050,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group);
@@ -2974,6 +3057,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
+ if (blk_get_integrity(ns->disk))
+ blk_integrity_unregister(ns->disk);
}
mutex_lock(&ns->ctrl->subsys->lock);
@@ -2986,6 +3071,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_unlock(&ns->ctrl->namespaces_mutex);
synchronize_srcu(&ns->head->srcu);
+ nvme_mpath_check_last_path(ns);
nvme_put_ns(ns);
}
@@ -3073,6 +3159,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE)
return;
+ WARN_ON_ONCE(!ctrl->tagset);
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3093,8 +3181,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
- * Do not queue new scan work when a controller is reset during
- * removal.
+ * Only new queue scan work when admin and IO queues are both alive
*/
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work);
@@ -3471,16 +3558,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void)
{
- int result;
+ int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
- return -ENOMEM;
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
- goto destroy_wq;
+ goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -3499,8 +3596,13 @@ destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
destroy_wq:
destroy_workqueue(nvme_wq);
+out:
return result;
}
@@ -3510,6 +3612,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6816a0..5dd4ceefed8f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
return NULL;
kref_init(&host->ref);
+ uuid_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
@@ -492,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/
int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
- if (!ops->create_ctrl)
+ if (!ops->create_ctrl || !ops->module)
return -EINVAL;
down_write(&nvmf_transports_rwsem);
@@ -738,11 +739,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
- if (uuid_parse(p, &hostid)) {
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
+ kfree(p);
goto out;
}
+ kfree(p);
break;
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
@@ -868,32 +872,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
- goto out_unlock;
+ goto out_module_put;
}
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn);
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
+out_module_put:
+ module_put(ops->module);
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9ba614953607..25b19f722f5b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree.
+ * @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller.
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/
struct nvmf_transport_ops {
struct list_head entry;
+ struct module *module;
const char *name;
int required_opts;
int allowed_opts;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0a8af4daef89..99bf51c7e513 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+ /* re-enable the admin_q so anything new can fast fail */
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(nctrl);
}
static void
@@ -3221,7 +3227,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
/* Remove core ctrl ref. */
nvme_put_ctrl(&ctrl->ctrl);
@@ -3381,6 +3386,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl,
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba3d7f3349e5..50ef71ee3d86 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -31,27 +31,10 @@
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_l2p_tbl = 0xea,
nvme_nvm_admin_get_bb_tbl = 0xf2,
nvme_nvm_admin_set_bb_tbl = 0xf1,
};
-struct nvme_nvm_hb_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 slba;
-};
-
struct nvme_nvm_ph_rw {
__u8 opcode;
__u8 flags;
@@ -80,19 +63,6 @@ struct nvme_nvm_identity {
__u32 rsvd11[5];
};
-struct nvme_nvm_l2ptbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[4];
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le32 nlb;
- __le16 cdw14[6];
-};
-
struct nvme_nvm_getbbtbl {
__u8 opcode;
__u8 flags;
@@ -139,9 +109,7 @@ struct nvme_nvm_command {
union {
struct nvme_common_command common;
struct nvme_nvm_identity identity;
- struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase;
@@ -167,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun;
__u8 num_pln;
__u8 rsvd1;
- __le16 num_blk;
+ __le16 num_chk;
__le16 num_pg;
__le16 fpg_sz;
__le16 csecs;
@@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
@@ -249,51 +215,58 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
- struct nvm_id_group *dst;
+ struct nvm_id_group *grp;
+ int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1)
return -EINVAL;
src = &nvme_nvm_id->groups[0];
- dst = &nvm_id->grp;
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
+ grp = &nvm_id->grp;
+
+ grp->mtype = src->mtype;
+ grp->fmtype = src->fmtype;
+
+ grp->num_ch = src->num_ch;
+ grp->num_lun = src->num_lun;
+
+ grp->num_chk = le16_to_cpu(src->num_chk);
+ grp->csecs = le16_to_cpu(src->csecs);
+ grp->sos = le16_to_cpu(src->sos);
+
+ pg_per_blk = le16_to_cpu(src->num_pg);
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pl = sec_per_pg * src->num_pln;
+ grp->clba = sec_per_pl * pg_per_blk;
+ grp->ws_per_chk = pg_per_blk;
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ grp->mpos = le32_to_cpu(src->mpos);
+ grp->cpar = le16_to_cpu(src->cpar);
+ grp->mccap = le32_to_cpu(src->mccap);
+
+ grp->ws_opt = grp->ws_min = sec_per_pg;
+ grp->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ if (grp->mpos & 0x020202) {
+ grp->ws_seq = NVM_IO_DUAL_ACCESS;
+ grp->ws_opt <<= 1;
+ } else if (grp->mpos & 0x040404) {
+ grp->ws_seq = NVM_IO_QUAD_ACCESS;
+ grp->ws_opt <<= 2;
}
+ grp->trdt = le32_to_cpu(src->trdt);
+ grp->trdm = le32_to_cpu(src->trdm);
+ grp->tprt = le32_to_cpu(src->tprt);
+ grp->tprm = le32_to_cpu(src->tprm);
+ grp->tbet = le32_to_cpu(src->tbet);
+ grp->tbem = le32_to_cpu(src->tbem);
+
+ /* 1.2 compatibility */
+ grp->num_pln = src->num_pln;
+ grp->num_pg = le16_to_cpu(src->num_pg);
+ grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+
return 0;
}
@@ -332,62 +305,6 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
- u32 nlb_pr_rq = len / sizeof(u64);
- u64 cmd_slba = slba;
- void *entries;
- int ret = 0;
-
- c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
- entries = kmalloc(len, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- while (nlb) {
- u32 cmd_nlb = min(nlb_pr_rq, nlb);
- u64 elba = slba + cmd_nlb;
-
- c.l2p.slba = cpu_to_le64(cmd_slba);
- c.l2p.nlb = cpu_to_le32(cmd_nlb);
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
- (struct nvme_command *)&c, entries, len);
- if (ret) {
- dev_err(ns->ctrl->device,
- "L2P table transfer failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(elba > nvmdev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Transform physical address to target address space */
- nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
-
- if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
- ret = -EINTR;
- goto out;
- }
-
- cmd_slba += cmd_nlb;
- nlb -= cmd_nlb;
- }
-
-out:
- kfree(entries);
- return ret;
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
@@ -397,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->blks_per_lun * geo->plane_mode;
+ int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -438,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -474,10 +391,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-
- if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
- c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
@@ -597,8 +510,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
static struct nvm_dev_ops nvme_nvm_dev_ops = {
.identity = nvme_nvm_identity,
- .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
-
.get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
@@ -883,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1218a9fca846..3b211d9e58b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -33,51 +33,11 @@ void nvme_failover_req(struct request *req)
kblockd_schedule_work(&ns->head->requeue_work);
}
-bool nvme_req_needs_failover(struct request *req)
+bool nvme_req_needs_failover(struct request *req, blk_status_t error)
{
if (!(req->cmd_flags & REQ_NVME_MPATH))
return false;
-
- switch (nvme_req(req)->status & 0x7ff) {
- /*
- * Generic command status:
- */
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CAP_EXCEEDED:
- case NVME_SC_RESERVATION_CONFLICT:
- return false;
-
- /*
- * I/O command set specific error. Unfortunately these values are
- * reused for fabrics commands, but those should never get here.
- */
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_PI:
- case NVME_SC_READ_ONLY:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
- nvme_fabrics_command);
- return false;
-
- /*
- * Media and Data Integrity Errors:
- */
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_COMPARE_FAILED:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_UNWRITTEN_BLOCK:
- return false;
- }
-
- /* Everything else could be a path failure, so should be retried */
- return true;
+ return blk_path_error(error);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa5283e8e..8e4550fa08f8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
enum {
NVME_NS_LBA = 0,
@@ -119,6 +121,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
+ NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
@@ -393,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -401,7 +405,7 @@ extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req);
+bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
@@ -417,11 +421,21 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
rcu_assign_pointer(head->current_path, NULL);
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+
+ if (head->disk && list_empty(&head->list))
+ kblockd_schedule_work(&head->requeue_work);
+}
+
#else
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req)
+static inline bool nvme_req_needs_failover(struct request *req,
+ blk_status_t error)
{
return false;
}
@@ -448,6 +462,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
}
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c9082..6fe7af00a1f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct nvme_queue **queues;
+ struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = dev->queues[queue_idx];
+ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int nseg = blk_rq_nr_phys_segments(req);
+ unsigned int avg_seg_size;
+
+ if (nseg == 0)
+ return false;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ return false;
+ if (!iod->nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
unsigned int size = blk_rq_payload_bytes(rq);
+ iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr_t prp_dma;
int nprps, i;
- iod->use_sgl = false;
-
length -= (page_size - offset);
if (length <= 0) {
iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd)
+ struct request *req, struct nvme_rw_command *cmd, int entries)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int length = blk_rq_payload_bytes(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sg;
- int entries = iod->nents, i = 0;
dma_addr_t sgl_dma;
-
- iod->use_sgl = true;
+ int i = 0;
/* setting the transfer type as SGL */
cmd->flags = NVME_CMD_SGL_METABUF;
- if (length == sg_dma_len(sg)) {
+ if (entries == 1) {
nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
return BLK_STS_OK;
}
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
}
nvme_pci_sgl_set_data(&sg_list[i++], sg);
-
- length -= sg_dma_len(sg);
sg = sg_next(sg);
- entries--;
- } while (length > 0);
+ } while (--entries > 0);
- WARN_ON(entries > 0);
return BLK_STS_OK;
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int avg_seg_size;
-
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
- blk_rq_nr_phys_segments(req));
-
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
- return false;
- if (!iod->nvmeq->qid)
- return false;
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return false;
- return true;
-}
-
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
enum dma_data_direction dma_dir = rq_data_dir(req) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
blk_status_t ret = BLK_STS_IOERR;
+ int nr_mapped;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN))
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+ DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
goto out;
- if (nvme_pci_use_sgls(dev, req))
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ if (iod->use_sgl)
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -1046,7 +1044,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@@ -1140,9 +1138,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
- /* If there is a reset ongoing, we shouldn't reset again. */
- if (dev->ctrl.state == NVME_CTRL_RESETTING)
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
return false;
+ default:
+ break;
+ }
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
@@ -1282,7 +1285,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1290,10 +1292,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
- dev->queues[i] = NULL;
- nvme_free_queue(nvmeq);
+ nvme_free_queue(&dev->queues[i]);
}
}
@@ -1325,12 +1325,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
- struct nvme_queue *nvmeq = dev->queues[0];
-
- if (!nvmeq)
- return;
- if (nvme_suspend_queue(nvmeq))
- return;
+ struct nvme_queue *nvmeq = &dev->queues[0];
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
@@ -1369,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
@@ -1384,13 +1379,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
- node);
- if (!nvmeq)
- return NULL;
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1409,17 +1404,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
- dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
- return nvmeq;
+ return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
- kfree(nvmeq);
- return NULL;
+ return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1592,14 +1585,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
- if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
- if (!nvmeq)
- return -ENOMEM;
- }
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
+ if (result)
+ return result;
+ nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1629,7 +1620,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
- if (!nvme_alloc_queue(dev, i, dev->q_depth,
+ if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@@ -1638,15 +1629,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(dev->queues[i], i);
+ ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
/*
* Ignore failing Create SQ/CQ commands, we can continue with less
- * than the desired aount of queues, and even a controller without
- * I/O queues an still be used to issue admin commands. This might
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
* be useful to upgrade a buggy firmware for example.
*/
return ret >= 0 ? 0 : ret;
@@ -1663,30 +1654,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
+{
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
{
- u64 szu, size, offset;
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- void __iomem *cmb;
int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
- if (!(NVME_CMB_SZ(dev->cmbsz)))
- return NULL;
+ if (!dev->cmbsz)
+ return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
if (!use_cmb_sqes)
- return NULL;
+ return;
- szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
- size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
- return NULL;
+ return;
/*
* Controllers may support a CMB size larger than their BAR,
@@ -1696,13 +1697,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!cmb)
- return NULL;
-
+ dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ if (!dev->cmb)
+ return;
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
- return cmb;
+
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->ctrl.device,
+ "failed to add sysfs attribute for CMB\n");
}
static inline void nvme_release_cmb(struct nvme_dev *dev)
@@ -1770,7 +1774,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma;
int i = 0;
void **bufs;
- u64 size = 0, tmp;
+ u64 size, tmp;
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
@@ -1853,7 +1857,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
- int ret = 0;
+ int ret;
preferred = min(preferred, max);
if (min > max) {
@@ -1894,7 +1898,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@@ -1907,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
@@ -2007,9 +2011,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -2020,7 +2024,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
- if (nvme_delete_queue(dev->queues[i], opcode))
+ if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@@ -2035,13 +2039,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
}
/*
- * Return: error value if an error occurred setting up the queues or calling
- * Identify Device. 0 if these succeeded, even if adding some of the
- * namespaces failed. At the moment, these failures are silent. TBD which
- * failures should be reported.
+ * return error value only when tagset allocation failed
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ int ret;
+
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2057,8 +2060,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
- if (blk_mq_alloc_tag_set(&dev->tagset))
- return 0;
+ ret = blk_mq_alloc_tag_set(&dev->tagset);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return ret;
+ }
dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
@@ -2124,22 +2131,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
- /*
- * CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
- * has no name we can pass NULL as final argument to
- * sysfs_add_file_to_group.
- */
-
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
- dev->cmb = nvme_map_cmb(dev);
- if (dev->cmb) {
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
- }
- }
+ nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -2172,7 +2164,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i, queues;
+ int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2207,21 +2199,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_stop_queues(&dev->ctrl);
- queues = dev->online_queues - 1;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
- nvme_suspend_queue(dev->queues[i]);
-
- if (dead) {
- /* A device might become IO incapable very soon during
- * probe, before the admin queue is configured. Thus,
- * queue_count can be 0 here.
- */
- if (dev->ctrl.queue_count)
- nvme_suspend_queue(dev->queues[0]);
- } else {
- nvme_disable_io_queues(dev, queues);
+ if (!dead) {
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
+ for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2291,6 +2275,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
@@ -2302,6 +2287,16 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ /*
+ * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller RECONNECTING\n");
+ goto out;
+ }
+
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2354,15 +2349,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
+ new_state = NVME_CTRL_ADMIN_ONLY;
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ /* hit this only when allocate tagset fails */
+ if (nvme_dev_add(dev))
+ new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl);
}
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
- dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
goto out;
}
@@ -2470,8 +2473,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
- GFP_KERNEL, node);
+
+ dev->queues = kcalloc_node(num_possible_cpus() + 1,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2498,10 +2502,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
- nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- queue_work(nvme_wq, &dev->ctrl.reset_work);
+ nvme_reset_ctrl(&dev->ctrl);
+
return 0;
release_pools:
@@ -2525,7 +2529,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_reset_ctrl_sync(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af56596be6..2bc059f7d73c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,7 +66,6 @@ struct nvme_rdma_request {
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
- bool inline_data;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -974,12 +973,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_start_queues(&ctrl->ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
nvme_rdma_reconnect_or_remove(ctrl);
}
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
queue_work(nvme_wq, &ctrl->err_work);
@@ -1086,7 +1091,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->inline_data = true;
req->num_sge++;
return 0;
}
@@ -1158,7 +1162,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
int count, ret;
req->num_sge = 1;
- req->inline_data = false;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -1753,6 +1756,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
ret = nvme_rdma_configure_admin_queue(ctrl, false);
if (ret)
goto out_fail;
@@ -2006,6 +2015,7 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
@@ -2028,7 +2038,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
static struct ib_client nvme_rdma_ib_client = {
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 000000000000..41944bbef835
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,130 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 000000000000..ea91fccd1bc0
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,165 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_admin_cmd(opcode, cdw10) \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release))
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_cmd(opcode, cdw10) \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
+
+TRACE_EVENT(nvme_setup_admin_cmd,
+ TP_PROTO(struct nvme_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->cid, __entry->flags, __entry->metadata,
+ show_admin_opcode_name(__entry->opcode),
+ __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
+);
+
+
+TRACE_EVENT(nvme_setup_nvm_cmd,
+ TP_PROTO(int qid, struct nvme_command *cmd),
+ TP_ARGS(qid, cmd),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->qid = qid;
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->qid, __entry->nsid, __entry->cid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->opcode),
+ __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->qid = req->q->id;
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ ),
+ TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->cid, __entry->qid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 03e4ab65fe77..5f4f8b16685f 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
depends on INFINIBAND
depends on NVME_TARGET
+ select SGL_ALLOC
help
This enables the NVMe RDMA target support, which allows exporting NVMe
devices over RDMA.
@@ -39,6 +40,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
+ select SGL_ALLOC
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b54748ad5f48..0bd737117a80 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->ns = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
@@ -830,7 +833,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */
if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
+ goto out_remove_ida;
}
/*
@@ -860,6 +863,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+out_remove_ida:
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
@@ -877,21 +882,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
-
mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
}
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index db3bf6b8bf9e..19e9e42ae943 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
}
- pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out:
kfree(d);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 5fd86039e353..9b39a6cb1935 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
struct scatterlist *sg;
- struct page *page;
unsigned int nent;
- u32 page_len, length;
- int i = 0;
- length = fod->req.transfer_len;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
if (!sg)
goto out;
- sg_init_table(sg, nent);
-
- while (length) {
- page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
fod->data_sg = sg;
fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0;
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- fod->data_sg = NULL;
- fod->data_sg_cnt = 0;
out:
return NVME_SC_INTERNAL;
}
@@ -1746,18 +1719,13 @@ out:
static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
- struct scatterlist *sg;
- int count;
-
if (!fod->data_sg || !fod->data_sg_cnt)
return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
- __free_page(sg_page(sg));
- kfree(fod->data_sg);
+ sgl_free(fod->data_sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
}
@@ -2522,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- /* a FC port can only be 1 nvmet port id */
- if (!tgtport->port) {
- tgtport->port = port;
- port->priv = tgtport;
- nvmet_fc_tgtport_get(tgtport);
- ret = 0;
- } else
- ret = -EALREADY;
+ tgtport->port = port;
+ ret = 0;
break;
}
}
@@ -2540,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- struct nvmet_fc_tgtport *tgtport = port->priv;
- unsigned long flags;
- bool matched = false;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (tgtport->port == port) {
- matched = true;
- tgtport->port = NULL;
- }
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
-
- if (matched)
- nvmet_fc_tgtport_put(tgtport);
+ /* nothing to do */
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9de55ab..34712def81b1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -204,6 +204,10 @@ struct fcloop_lport {
struct completion unreg_done;
};
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
int status;
};
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
struct fcloop_fcpreq {
struct fcloop_tport *tport;
struct nvmefc_fcp_req *fcpreq;
spinlock_t reqlock;
u16 status;
+ u32 inistate;
bool active;
bool aborted;
- struct work_struct work;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
struct nvmefc_tgt_fcp_req tgt_fcp_req;
};
struct fcloop_ini_fcpreq {
struct nvmefc_fcp_req *fcpreq;
struct fcloop_fcpreq *tfcp_req;
- struct work_struct iniwork;
+ spinlock_t inilock;
};
static inline struct fcloop_lsreq *
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
-/*
- * FCP IO operation done by initiator abort.
- * call back up initiator "done" flows.
- */
static void
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+fcloop_tfcp_req_free(struct kref *ref)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
{
- struct fcloop_ini_fcpreq *inireq =
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ int ret = 0;
+ bool aborted = false;
- inireq->fcpreq->done(inireq->fcpreq);
+ spin_lock(&tfcp_req->reqlock);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+ return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
+
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock(&tfcp_req->reqlock);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock(&tfcp_req->reqlock);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
}
/*
@@ -364,20 +484,15 @@ static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
- container_of(work, struct fcloop_fcpreq, work);
- struct fcloop_tport *tport = tfcp_req->tport;
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
- if (tport->remoteport && fcpreq) {
- fcpreq->status = tfcp_req->status;
- fcpreq->done(fcpreq);
- }
-
- kfree(tfcp_req);
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
- int ret = 0;
if (!rport->targetport)
return -ECONNREFUSED;
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
inireq->fcpreq = fcpreq;
inireq->tfcp_req = tfcp_req;
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
+ spin_lock_init(&inireq->inilock);
+
tfcp_req->fcpreq = fcpreq;
tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
spin_lock_init(&tfcp_req->reqlock);
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
+ schedule_work(&tfcp_req->fcp_rcv_work);
- return ret;
+ return 0;
}
static void
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->work);
+ schedule_work(&tfcp_req->tio_done_work);
}
static void
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
void *hw_queue_handle,
struct nvmefc_fcp_req *fcpreq)
{
- struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
if (!tfcp_req)
/* abort has already been called */
return;
- if (rport->targetport)
- nvmet_fc_rcv_fcp_abort(rport->targetport,
- &tfcp_req->tgt_fcp_req);
-
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
- inireq->tfcp_req = NULL;
- tfcp_req->fcpreq = NULL;
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
spin_unlock(&tfcp_req->reqlock);
- /* post the aborted io completion */
- fcpreq->status = -ECANCELED;
- schedule_work(&inireq->iniwork);
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
}
static void
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
- struct fcloop_lport *lport = localport->private;
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* sizes of additional private data for data structures */
- .local_priv_sz = sizeof(struct fcloop_lport),
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+ .target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
};
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
struct fcloop_ctrl_options *opts;
struct nvme_fc_local_port *localport;
struct fcloop_lport *lport;
- int ret;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
- return -ENOMEM;
+ goto out_free_lport;
ret = fcloop_parse_options(opts, buf);
if (ret)
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
if (!ret) {
- unsigned long flags;
-
/* success */
- lport = localport->private;
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
spin_unlock_irqrestore(&fcloop_lock, flags);
-
- /* mark all of the input buffer consumed */
- ret = count;
}
out_free_opts:
kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
return ret ? ret : count;
}
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
wait_for_completion(&lport->unreg_done);
+ kfree(lport);
+
return ret;
}
@@ -1085,7 +1233,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fcloop_nport *nport = NULL, *tmpport;
- struct fcloop_tport *tport;
+ struct fcloop_tport *tport = NULL;
u64 nodename, portname;
unsigned long flags;
int ret;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1e21b286f299..7991ec3a17db 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
+ .module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
};
@@ -716,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 49912909c298..978e169c11bf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
- struct scatterlist *sg;
- int count;
-
- if (!sgl || !nents)
- return;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
- kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
- u32 length)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned int nent;
- int i = 0;
-
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- goto out;
-
- sg_init_table(sg, nent);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
-out:
- return NVME_SC_INTERNAL;
-}
-
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != &rsp->cmd->inline_sg)
- nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+ sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
- u16 status;
/* no data command? */
if (!len)
return 0;
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
+ rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+ if (!rsp->req.sg)
+ return NVME_SC_INTERNAL;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -976,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
- pr_info("freeing queue %d\n", queue->idx);
+ pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
@@ -1558,25 +1503,9 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- struct nvmet_rdma_queue *queue;
-
nvmet_unregister_transport(&nvmet_rdma_ops);
-
- flush_scheduled_work();
-
- mutex_lock(&nvmet_rdma_queue_mutex);
- while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
- struct nvmet_rdma_queue, queue_list))) {
- list_del_init(&queue->queue_list);
-
- mutex_unlock(&nvmet_rdma_queue_mutex);
- __nvmet_rdma_queue_disconnect(queue);
- mutex_lock(&nvmet_rdma_queue_mutex);
- }
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida);
}
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index a346b4923550..41d3a3c1104e 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
- for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
- addr = i / efuse->config.word_size;
+ for (i = 0; i < bytes; i += efuse->config.word_size) {
+ addr = (offset + i) / efuse->config.word_size;
err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
if (err)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 26618ba8f92a..a9d6fe86585b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -316,6 +316,32 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
EXPORT_SYMBOL(of_get_cpu_node);
/**
+ * of_cpu_node_to_id: Get the logical CPU number for a given device_node
+ *
+ * @cpu_node: Pointer to the device_node for CPU.
+ *
+ * Returns the logical CPU number of the given CPU device_node.
+ * Returns -ENODEV if the CPU is not found.
+ */
+int of_cpu_node_to_id(struct device_node *cpu_node)
+{
+ int cpu;
+ bool found = false;
+ struct device_node *np;
+
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ found = (cpu_node == np);
+ of_node_put(np);
+ if (found)
+ return cpu;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(of_cpu_node_to_id);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 98258583abb0..8c0c92712fc9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -77,10 +77,16 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
if (of_property_read_bool(child, "broken-turn-around"))
mdio->phy_ignore_ta_mask |= 1 << addr;
+ of_property_read_u32(child, "reset-assert-us",
+ &phy->mdio.reset_assert_delay);
+ of_property_read_u32(child, "reset-deassert-us",
+ &phy->mdio.reset_deassert_delay);
+
/* Associate the OF node with the device structure so it
* can be looked up later */
of_node_get(child);
phy->mdio.dev.of_node = child;
+ phy->mdio.dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the phy struct;
* register it */
@@ -111,6 +117,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
*/
of_node_get(child);
mdiodev->dev.of_node = child;
+ mdiodev->dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
@@ -206,6 +213,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->phy_mask = ~0;
mdio->dev.of_node = np;
+ mdio->dev.fwnode = of_fwnode_handle(np);
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
@@ -228,7 +236,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
rc = of_mdiobus_register_phy(mdio, child, addr);
else
rc = of_mdiobus_register_device(mdio, child, addr);
- if (rc)
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
goto unregister;
}
@@ -252,7 +265,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc)
+ if (rc && rc != -ENODEV)
goto unregister;
}
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8ad33a44a7b8..f25d36358187 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return (void *)of_device_get_match_data(dev);
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
.device_is_available = of_fwnode_device_is_available,
+ .device_get_match_data = of_fwnode_device_get_match_data,
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..6ce6aefacc81 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644
index 000000000000..370eff3acd8a
--- /dev/null
+++ b/drivers/opp/ti-opp-supply.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon <nm@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv: reference voltage (usually Nominal voltage)
+ * @optimized_uv: Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+ unsigned int reference_uv;
+ unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+ struct ti_opp_supply_optimum_voltage_table *vdd_table;
+ u32 num_vdd_table;
+ u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags: specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ * milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1)
+#define OPPDM_HAS_NO_ABB BIT(2)
+ const u8 flags;
+ const u32 efuse_voltage_mask;
+ const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev: ti opp supply device for which we need to store info
+ * @data: data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ void __iomem *base;
+ struct property *prop;
+ struct resource *res;
+ const __be32 *val;
+ int proplen, i;
+ int ret = 0;
+ struct ti_opp_supply_optimum_voltage_table *table;
+ const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+ /* pick up Efuse based voltages */
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto out_map;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "Unable to map Efuse registers\n");
+ ret = -ENOMEM;
+ goto out_map;
+ }
+
+ /* Fetch efuse-settings. */
+ prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'ti,efuse-settings' property found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ proplen = prop->length / sizeof(int);
+ data->num_vdd_table = proplen / 2;
+ /* Verify for corrupted OPP entries in dt */
+ if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+ dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+ &data->vdd_absolute_max_voltage_uv);
+ if (ret) {
+ dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ table = kzalloc(sizeof(*data->vdd_table) *
+ data->num_vdd_table, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->vdd_table = table;
+
+ val = prop->value;
+ for (i = 0; i < data->num_vdd_table; i++, table++) {
+ u32 efuse_offset;
+ u32 tmp;
+
+ table->reference_uv = be32_to_cpup(val++);
+ efuse_offset = be32_to_cpup(val++);
+
+ tmp = readl(base + efuse_offset);
+ tmp &= of_data->efuse_voltage_mask;
+ tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+ table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+ tmp * 1000;
+
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+ i, efuse_offset, table->reference_uv,
+ table->optimized_uv);
+
+ /*
+ * Some older samples might not have optimized efuse
+ * Use reference voltage for those - just add debug message
+ * for them.
+ */
+ if (!table->optimized_uv) {
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+ i, efuse_offset, table->reference_uv);
+ table->optimized_uv = table->reference_uv;
+ }
+ }
+out:
+ iounmap(base);
+out_map:
+ return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev: device for which we need to free info
+ * @data: data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ kfree(data->vdd_table);
+ data->vdd_table = NULL;
+ data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev: device for which we need to find info
+ * @data: data specific to the device
+ * @reference_uv: reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+ struct ti_opp_supply_data *data,
+ int reference_uv)
+{
+ int i;
+ struct ti_opp_supply_optimum_voltage_table *table;
+
+ if (!data->num_vdd_table)
+ return reference_uv;
+
+ table = data->vdd_table;
+ if (!table)
+ return -EINVAL;
+
+ /* Find a exact match - this list is usually very small */
+ for (i = 0; i < data->num_vdd_table; i++, table++)
+ if (table->reference_uv == reference_uv)
+ return table->optimized_uv;
+
+ /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+ dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+ __func__, reference_uv);
+ return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+ struct dev_pm_opp_supply *supply,
+ int new_target_uv, struct regulator *reg,
+ char *reg_name)
+{
+ int ret;
+ unsigned long vdd_uv, uv_max;
+
+ if (new_target_uv)
+ vdd_uv = new_target_uv;
+ else
+ vdd_uv = supply->u_volt;
+
+ /*
+ * If we do have an absolute max voltage specified, then we should
+ * use that voltage instead to allow for cases where the voltage rails
+ * are ganged (example if we set the max for an opp as 1.12v, and
+ * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+ * be achieved if the regulator is constrainted to max of 1.12v, even
+ * if it can function at 1.25v
+ */
+ if (opp_data.vdd_absolute_max_voltage_uv)
+ uv_max = opp_data.vdd_absolute_max_voltage_uv;
+ else
+ uv_max = supply->u_volt_max;
+
+ if (vdd_uv > uv_max ||
+ vdd_uv < supply->u_volt_min ||
+ supply->u_volt_min > uv_max) {
+ dev_warn(dev,
+ "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+ supply->u_volt_min, vdd_uv, uv_max);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+ vdd_uv, supply->u_volt_min,
+ uv_max);
+
+ ret = regulator_set_voltage_triplet(reg,
+ supply->u_volt_min,
+ vdd_uv,
+ uv_max);
+ if (ret) {
+ dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+ reg_name, vdd_uv, supply->u_volt_min,
+ uv_max);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data: information on regulators and new and old opps provided by
+ * opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+ struct device *dev = data->dev;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct clk *clk = data->clk;
+ struct regulator *vdd_reg = data->regulators[0];
+ struct regulator *vbb_reg = data->regulators[1];
+ int vdd_uv;
+ int ret;
+
+ vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+ new_supply_vbb->u_volt);
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_voltage;
+
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_freq;
+
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ ret = clk_set_rate(clk, old_freq);
+ if (ret)
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply_vdd->u_volt) {
+ ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ return ret;
+
+ ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+ "vdd");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+ {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+ {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+ {.compatible = "ti,omap5-core-opp-supply",
+ .data = &omap_omap5core_of_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev = get_cpu_device(0);
+ const struct of_device_id *match;
+ const struct ti_opp_supply_of_data *of_data;
+ int ret = 0;
+
+ match = of_match_device(ti_opp_supply_of_match, dev);
+ if (!match) {
+ /* We do not expect this to happen */
+ dev_err(dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+ if (!match->data) {
+ /* Again, unlikely.. but mistakes do happen */
+ dev_err(dev, "%s: Bad data in match\n", __func__);
+ return -EINVAL;
+ }
+ of_data = match->data;
+
+ dev_set_drvdata(dev, (void *)of_data);
+
+ /* If we need optimized voltage */
+ if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+ ret = _store_optimized_voltages(dev, &opp_data);
+ if (ret)
+ return ret;
+ }
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+ ti_opp_supply_set_opp));
+ if (ret)
+ _free_optimized_voltages(dev, &opp_data);
+
+ return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+ .probe = ti_opp_supply_probe,
+ .driver = {
+ .name = "ti_opp_supply",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99d9b89..7390fb8ca9d1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
- printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
+ printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
- DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
+ DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
__func__, bus, bus->busn_res.start,
bus->bridge->platform_data);
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed "
- "0x%lx/%lx (hpa 0x%p)\n",
+ "0x%lx/%lx (hpa 0x%px)\n",
name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b1308128..99a80da6fd2e 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
return retval;
}
- printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
+ printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
return 0;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a25fed52f7e9..41b740aed3a3 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
iounmap(base_addr);
}
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1292)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+ quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1291)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+ quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 04dac6a42c9f..6b8d060d07de 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796eccb2be..52ab3cb0a0bf 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_bridge;
+ goto err_free_resource_list;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_bridge;
+ goto err_free_resource_list;
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
err_pm_disable:
pm_runtime_disable(dev);
-err_free_bridge:
- pci_free_host_bridge(bridge);
+err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+ pci_free_host_bridge(bridge);
return err;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..5958c8dda4e3 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
pm_generic_complete(dev);
/* Resume device if platform firmware has put it in reset-power-on */
- if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * pci_pm_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
+
return 0;
}
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_late(dev);;
+ return pm_generic_freeze_late(dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
@@ -999,7 +1014,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
* the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
@@ -1012,7 +1027,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
- pci_update_current_state(pci_dev, PCI_D0);
+ /*
+ * pci_restore_state() requires the device to be in D0 (because of MSI
+ * restoration among other things), so force it into D0 in case the
+ * driver's "freeze" callbacks put it into a low-power state directly.
+ */
+ pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4a7c6864fdf4..764ca7b8840d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1458,6 +1458,7 @@ struct pci_devres {
unsigned int pinned:1;
unsigned int orig_intx:1;
unsigned int restore_intx:1;
+ unsigned int mwi:1;
u32 region_mask;
};
@@ -1476,6 +1477,9 @@ static void pcim_release(struct device *gendev, void *res)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
+ if (this->mwi)
+ pci_clear_mwi(dev);
+
if (this->restore_intx)
pci_intx(dev, this->orig_intx);
@@ -3761,6 +3765,27 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (!dr)
+ return -ENOMEM;
+
+ dr->mwi = 1;
+ return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ffbf4e723527..fb1c1bb87316 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 730cc897b94d..fab143a17f9c 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -510,11 +510,11 @@ out:
return -EBADMSG;
}
-static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(filp, &stuser->comp.wait, wait);
poll_wait(filp, &stdev->event_wq, wait);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index b8f44b068fc6..da5724cd89cf 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_DSU_PMU
+ tristate "ARM DynamIQ Shared Unit (DSU) PMU"
+ depends on ARM64
+ help
+ Provides support for performance monitor unit in ARM DynamIQ Shared
+ Unit (DSU). The DSU integrates one or more cores with an L3 memory
+ system, control logic. The PMU allows counting various events related
+ to DSU.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 710a0135bd61..c2f27419bdf0 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
new file mode 100644
index 000000000000..93c50e377507
--- /dev/null
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -0,0 +1,843 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU driver
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#define PMUNAME "arm_dsu"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/arm_dsu_pmu.h>
+#include <asm/local64.h>
+
+/* PMU event codes */
+#define DSU_PMU_EVT_CYCLES 0x11
+#define DSU_PMU_EVT_CHAIN 0x1e
+
+#define DSU_PMU_MAX_COMMON_EVENTS 0x40
+
+#define DSU_PMU_MAX_HW_CNTRS 32
+#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1)
+
+#define CLUSTERPMCR_E BIT(0)
+#define CLUSTERPMCR_P BIT(1)
+#define CLUSTERPMCR_C BIT(2)
+#define CLUSTERPMCR_N_SHIFT 11
+#define CLUSTERPMCR_N_MASK 0x1f
+#define CLUSTERPMCR_IDCODE_SHIFT 16
+#define CLUSTERPMCR_IDCODE_MASK 0xff
+#define CLUSTERPMCR_IMP_SHIFT 24
+#define CLUSTERPMCR_IMP_MASK 0xff
+#define CLUSTERPMCR_RES_MASK 0x7e8
+#define CLUSTERPMCR_RES_VAL 0x40
+
+#define DSU_ACTIVE_CPU_MASK 0x0
+#define DSU_ASSOCIATED_CPU_MASK 0x1
+
+/*
+ * We use the index of the counters as they appear in the counter
+ * bit maps in the PMU registers (e.g CLUSTERPMSELR).
+ * i.e,
+ * counter 0 - Bit 0
+ * counter 1 - Bit 1
+ * ...
+ * Cycle counter - Bit 31
+ */
+#define DSU_PMU_IDX_CYCLE_COUNTER 31
+
+/* All event counters are 32bit, with a 64bit Cycle counter */
+#define DSU_PMU_COUNTER_WIDTH(idx) \
+ (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
+
+#define DSU_PMU_COUNTER_MASK(idx) \
+ GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
+
+#define DSU_EXT_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { \
+ .attr = __ATTR(_name, 0444, _func, NULL), \
+ .var = (void *)_config \
+ } \
+ })[0].attr.attr)
+
+#define DSU_EVENT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
+
+#define DSU_FORMAT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
+
+#define DSU_CPUMASK_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
+
+struct dsu_hw_events {
+ DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
+ struct perf_event *events[DSU_PMU_MAX_HW_CNTRS];
+};
+
+/*
+ * struct dsu_pmu - DSU PMU descriptor
+ *
+ * @pmu_lock : Protects accesses to DSU PMU register from normal vs
+ * interrupt handler contexts.
+ * @hw_events : Holds the event counter state.
+ * @associated_cpus : CPUs attached to the DSU.
+ * @active_cpu : CPU to which the PMU is bound for accesses.
+ * @cpuhp_node : Node for CPU hotplug notifier link.
+ * @num_counters : Number of event counters implemented by the PMU,
+ * excluding the cycle counter.
+ * @irq : Interrupt line for counter overflow.
+ * @cpmceid_bitmap : Bitmap for the availability of architected common
+ * events (event_code < 0x40).
+ */
+struct dsu_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ raw_spinlock_t pmu_lock;
+ struct dsu_hw_events hw_events;
+ cpumask_t associated_cpus;
+ cpumask_t active_cpu;
+ struct hlist_node cpuhp_node;
+ s8 num_counters;
+ int irq;
+ DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
+};
+
+static unsigned long dsu_pmu_cpuhp_state;
+
+static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct dsu_pmu, pmu);
+}
+
+static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
+ (unsigned long)eattr->var);
+}
+
+static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t dsu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ unsigned long mask_id = (unsigned long)eattr->var;
+ const cpumask_t *cpumask;
+
+ switch (mask_id) {
+ case DSU_ACTIVE_CPU_MASK:
+ cpumask = &dsu_pmu->active_cpu;
+ break;
+ case DSU_ASSOCIATED_CPU_MASK:
+ cpumask = &dsu_pmu->associated_cpus;
+ break;
+ default:
+ return 0;
+ }
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+
+static struct attribute *dsu_pmu_format_attrs[] = {
+ DSU_FORMAT_ATTR(event, "config:0-31"),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = dsu_pmu_format_attrs,
+};
+
+static struct attribute *dsu_pmu_event_attrs[] = {
+ DSU_EVENT_ATTR(cycles, 0x11),
+ DSU_EVENT_ATTR(bus_access, 0x19),
+ DSU_EVENT_ATTR(memory_error, 0x1a),
+ DSU_EVENT_ATTR(bus_cycles, 0x1d),
+ DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
+ DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
+ DSU_EVENT_ATTR(l3d_cache, 0x2b),
+ DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
+ NULL,
+};
+
+static umode_t
+dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr.attr);
+ unsigned long evt = (unsigned long)eattr->var;
+
+ return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
+}
+
+static const struct attribute_group dsu_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = dsu_pmu_event_attrs,
+ .is_visible = dsu_pmu_event_attr_is_visible,
+};
+
+static struct attribute *dsu_pmu_cpumask_attrs[] = {
+ DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
+ DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_cpumask_attr_group = {
+ .attrs = dsu_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *dsu_pmu_attr_groups[] = {
+ &dsu_pmu_cpumask_attr_group,
+ &dsu_pmu_events_attr_group,
+ &dsu_pmu_format_attr_group,
+ NULL,
+};
+
+static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
+{
+ struct cpumask online_supported;
+
+ cpumask_and(&online_supported,
+ &dsu_pmu->associated_cpus, cpu_online_mask);
+ return cpumask_any_but(&online_supported, cpu);
+}
+
+static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
+{
+ return (idx < dsu_pmu->num_counters) ||
+ (idx == DSU_PMU_IDX_CYCLE_COUNTER);
+}
+
+static inline u64 dsu_pmu_read_counter(struct perf_event *event)
+{
+ u64 val;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return 0;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying reading invalid counter %d\n", idx);
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ val = __dsu_pmu_read_pmccntr();
+ else
+ val = __dsu_pmu_read_counter(idx);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+
+ return val;
+}
+
+static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
+{
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "writing to invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ __dsu_pmu_write_pmccntr(val);
+ else
+ __dsu_pmu_write_counter(idx, val);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ int idx;
+ unsigned long evtype = event->attr.config;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ unsigned long *used_mask = hw_events->used_mask;
+
+ if (evtype == DSU_PMU_EVT_CYCLES) {
+ if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
+ return -EAGAIN;
+ return DSU_PMU_IDX_CYCLE_COUNTER;
+ }
+
+ idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
+ if (idx >= dsu_pmu->num_counters)
+ return -EAGAIN;
+ set_bit(idx, hw_events->used_mask);
+ return idx;
+}
+
+static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_counter_interrupt_enable(idx);
+ __dsu_pmu_enable_counter(idx);
+}
+
+static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_disable_counter(idx);
+ __dsu_pmu_counter_interrupt_disable(idx);
+}
+
+static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ unsigned long flags;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying to set invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ __dsu_pmu_set_event(idx, event->hw.config_base);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_count, new_count;
+
+ do {
+ /* We may also be called from the irq handler */
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = dsu_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
+ prev_count);
+ delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
+ local64_add(delta, &event->count);
+}
+
+static void dsu_pmu_read(struct perf_event *event)
+{
+ dsu_pmu_event_update(event);
+}
+
+static inline u32 dsu_pmu_get_reset_overflow(void)
+{
+ return __dsu_pmu_get_reset_overflow();
+}
+
+/**
+ * dsu_pmu_set_event_period: Set the period for the counter.
+ *
+ * All DSU PMU event counters, except the cycle counter are 32bit
+ * counters. To handle cases of extreme interrupt latency, we program
+ * the counter with half of the max count for the counters.
+ */
+static void dsu_pmu_set_event_period(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
+
+ local64_set(&event->hw.prev_count, val);
+ dsu_pmu_write_counter(event, val);
+}
+
+static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
+{
+ int i;
+ bool handled = false;
+ struct dsu_pmu *dsu_pmu = dev;
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ unsigned long overflow;
+
+ overflow = dsu_pmu_get_reset_overflow();
+ if (!overflow)
+ return IRQ_NONE;
+
+ for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
+ struct perf_event *event = hw_events->events[i];
+
+ if (!event)
+ continue;
+ dsu_pmu_event_update(event);
+ dsu_pmu_set_event_period(event);
+ handled = true;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ /* We always reprogram the counter */
+ if (pmu_flags & PERF_EF_RELOAD)
+ WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
+ dsu_pmu_set_event_period(event);
+ if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
+ dsu_pmu_set_event(dsu_pmu, event);
+ event->hw.state = 0;
+ dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
+}
+
+static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+ dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
+ dsu_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dsu_pmu_add(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return -ENOENT;
+
+ idx = dsu_pmu_get_event_idx(hw_events, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ dsu_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void dsu_pmu_del(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ dsu_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void dsu_pmu_enable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ /* If no counters are added, skip enabling the PMU */
+ if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
+ return;
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr |= CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_disable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr &= ~CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static bool dsu_pmu_validate_event(struct pmu *pmu,
+ struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ if (is_software_event(event))
+ return true;
+ /* Reject groups spanning multiple HW PMUs. */
+ if (event->pmu != pmu)
+ return false;
+ return dsu_pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+/*
+ * Make sure the group of events can be scheduled at once
+ * on the PMU.
+ */
+static bool dsu_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct dsu_hw_events fake_hw;
+
+ if (event->group_leader == event)
+ return true;
+
+ memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
+ return false;
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
+ return false;
+ }
+ return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
+}
+
+static int dsu_pmu_event_init(struct perf_event *event)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event)) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
+ return -EINVAL;
+ }
+
+ if (has_branch_stack(event) ||
+ event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
+ dev_dbg(dsu_pmu->pmu.dev,
+ "Requested cpu is not associated with the DSU\n");
+ return -EINVAL;
+ }
+ /*
+ * Choose the current active CPU to read the events. We don't want
+ * to migrate the event contexts, irq handling etc to the requested
+ * CPU. As long as the requested CPU is within the same DSU, we
+ * are fine.
+ */
+ event->cpu = cpumask_first(&dsu_pmu->active_cpu);
+ if (event->cpu >= nr_cpu_ids)
+ return -EINVAL;
+ if (!dsu_pmu_validate_group(event))
+ return -EINVAL;
+
+ event->hw.config_base = event->attr.config;
+ return 0;
+}
+
+static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu;
+
+ dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
+ if (!dsu_pmu)
+ return ERR_PTR(-ENOMEM);
+
+ raw_spin_lock_init(&dsu_pmu->pmu_lock);
+ /*
+ * Initialise the number of counters to -1, until we probe
+ * the real number on a connected CPU.
+ */
+ dsu_pmu->num_counters = -1;
+ return dsu_pmu;
+}
+
+/**
+ * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
+ */
+static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
+{
+ int i = 0, n, cpu;
+ struct device_node *cpu_node;
+
+ n = of_count_phandle_with_args(dev, "cpus", NULL);
+ if (n <= 0)
+ return -ENODEV;
+ for (; i < n; i++) {
+ cpu_node = of_parse_phandle(dev, "cpus", i);
+ if (!cpu_node)
+ break;
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ /*
+ * We have to ignore the failures here and continue scanning
+ * the list to handle cases where the nr_cpus could be capped
+ * in the running kernel.
+ */
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, mask);
+ }
+ return 0;
+}
+
+/*
+ * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
+ */
+static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
+{
+ u64 num_counters;
+ u32 cpmceid[2];
+
+ num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
+ CLUSTERPMCR_N_MASK;
+ /* We can only support up to 31 independent counters */
+ if (WARN_ON(num_counters > 31))
+ num_counters = 31;
+ dsu_pmu->num_counters = num_counters;
+ if (!dsu_pmu->num_counters)
+ return;
+ cpmceid[0] = __dsu_pmu_read_pmceid(0);
+ cpmceid[1] = __dsu_pmu_read_pmceid(1);
+ bitmap_from_u32array(dsu_pmu->cpmceid_bitmap,
+ DSU_PMU_MAX_COMMON_EVENTS,
+ cpmceid,
+ ARRAY_SIZE(cpmceid));
+}
+
+static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
+{
+ cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
+ if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+ pr_warn("Failed to set irq affinity to %d\n", cpu);
+}
+
+/*
+ * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
+ * we haven't done it already.
+ */
+static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
+{
+ if (dsu_pmu->num_counters == -1)
+ dsu_pmu_probe_pmu(dsu_pmu);
+ /* Reset the interrupt overflow mask */
+ dsu_pmu_get_reset_overflow();
+}
+
+static int dsu_pmu_device_probe(struct platform_device *pdev)
+{
+ int irq, rc;
+ struct dsu_pmu *dsu_pmu;
+ char *name;
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ dsu_pmu = dsu_pmu_alloc(pdev);
+ if (IS_ERR(dsu_pmu))
+ return PTR_ERR(dsu_pmu);
+
+ rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
+ return rc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to find IRQ\n");
+ return -EINVAL;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ PMUNAME, atomic_inc_return(&pmu_idx));
+ if (!name)
+ return -ENOMEM;
+ rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
+ IRQF_NOBALANCING, name, dsu_pmu);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ dsu_pmu->irq = irq;
+ platform_set_drvdata(pdev, dsu_pmu);
+ rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ if (rc)
+ return rc;
+
+ dsu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .module = THIS_MODULE,
+ .pmu_enable = dsu_pmu_enable,
+ .pmu_disable = dsu_pmu_disable,
+ .event_init = dsu_pmu_event_init,
+ .add = dsu_pmu_add,
+ .del = dsu_pmu_del,
+ .start = dsu_pmu_start,
+ .stop = dsu_pmu_stop,
+ .read = dsu_pmu_read,
+
+ .attr_groups = dsu_pmu_attr_groups,
+ };
+
+ rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
+ if (rc) {
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ }
+
+ return rc;
+}
+
+static int dsu_pmu_device_remove(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&dsu_pmu->pmu);
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id dsu_pmu_of_match[] = {
+ { .compatible = "arm,dsu-pmu", },
+ {},
+};
+
+static struct platform_driver dsu_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(dsu_pmu_of_match),
+ },
+ .probe = dsu_pmu_device_probe,
+ .remove = dsu_pmu_device_remove,
+};
+
+static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
+ return 0;
+
+ /* If the PMU is already managed, there is nothing to do */
+ if (!cpumask_empty(&dsu_pmu->active_cpu))
+ return 0;
+
+ dsu_pmu_init_pmu(dsu_pmu);
+ dsu_pmu_set_active_cpu(cpu, dsu_pmu);
+
+ return 0;
+}
+
+static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ int dst;
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
+ return 0;
+
+ dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ /* If there are no active CPUs in the DSU, leave IRQ disabled */
+ if (dst >= nr_cpu_ids) {
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
+ dsu_pmu_set_active_cpu(dst, dsu_pmu);
+
+ return 0;
+}
+
+static int __init dsu_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DRVNAME,
+ dsu_pmu_cpu_online,
+ dsu_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+ return platform_driver_register(&dsu_pmu_driver);
+}
+
+static void __exit dsu_pmu_exit(void)
+{
+ platform_driver_unregister(&dsu_pmu_driver);
+ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
+}
+
+module_init(dsu_pmu_init);
+module_exit(dsu_pmu_exit);
+
+MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
+MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
+MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 91b224eced18..46501cc79fd7 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -82,19 +82,10 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
return -EINVAL;
}
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
-
- if (cpu >= nr_cpu_ids) {
+ cpu = of_cpu_node_to_id(dn);
+ if (cpu < 0) {
pr_warn("failed to find logical CPU for %s\n", dn->name);
+ cpu = nr_cpu_ids;
}
of_node_put(dn);
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8ce262fc2561..51b40aecb776 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
struct arm_spe_pmu *spe_pmu;
struct device *dev = &pdev->dev;
+ /*
+ * If kernelspace is unmapped when running at EL0, then the SPE
+ * buffer will fault and prematurely terminate the AUX session.
+ */
+ if (arm64_kernel_unmapped_at_el0()) {
+ dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
+ return -EPERM;
+ }
+
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
if (!spe_pmu) {
dev_err(dev, "failed to allocate spe_pmu\n");
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 3f953db70288..8708ea3b4d6d 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -150,6 +150,9 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
+
+ RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
};
enum sata_phy_ctrl_regs {
@@ -505,8 +508,36 @@ static int brcm_sata_phy_init(struct phy *phy)
return rc;
}
+static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = BIT(8);
+
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, tmp);
+}
+
+static int brcm_sata_phy_calibrate(struct phy *phy)
+{
+ struct brcm_sata_port *port = phy_get_drvdata(phy);
+ int rc = -EOPNOTSUPP;
+
+ switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_STB_40NM:
+ brcm_stb_sata_calibrate(port);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static const struct phy_ops phy_ops = {
.init = brcm_sata_phy_init,
+ .calibrate = brcm_sata_phy_calibrate,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index accaaaccb662..6601ad0dfb3a 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
+ if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b4964b067aec..8f6e8e28996d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
if (ret)
return ERR_PTR(-ENODEV);
+ /* This phy type handled by the usb-phy subsystem for now */
+ if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index cb09245e9b4c..c845facacb06 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
depends on ARCH_RENESAS
depends on EXTCON
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index ee85fa0ca4b0..7492c8978217 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy: %s\n",
child_np->name);
+ pm_runtime_disable(dev);
return PTR_ERR(phy);
}
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "Failed to register phy provider\n");
+ pm_runtime_disable(dev);
return PTR_ERR(phy_provider);
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 4307bf0013e1..63e916d4d069 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
static struct device_node *
tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *pads, *np;
+
+ pads = of_get_child_by_name(padctl->dev->of_node, "pads");
+ if (!pads)
+ return NULL;
- np = of_find_node_by_name(np, "pads");
- if (np)
- np = of_find_node_by_name(np, name);
+ np = of_get_child_by_name(pads, name);
+ of_node_put(pads);
return np;
}
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
static struct device_node *
tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(pad->dev.of_node);
+ struct device_node *np, *lanes;
- np = of_find_node_by_name(np, "lanes");
- if (!np)
+ lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
+ if (!lanes)
return NULL;
- return of_find_node_by_name(np, pad->soc->lanes[index].name);
+ np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
+ of_node_put(lanes);
+
+ return np;
}
static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
unsigned int i;
int err;
- children = of_find_node_by_name(pad->dev.of_node, "lanes");
+ children = of_get_child_by_name(pad->dev.of_node, "lanes");
if (!children)
return -ENODEV;
@@ -444,21 +444,21 @@ static struct device_node *
tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *ports, *np;
+ char *name;
- np = of_find_node_by_name(np, "ports");
- if (np) {
- char *name;
+ ports = of_get_child_by_name(padctl->dev->of_node, "ports");
+ if (!ports)
+ return NULL;
- name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
- if (!name)
- return ERR_PTR(-ENOMEM);
- np = of_find_node_by_name(np, name);
- kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name) {
+ of_node_put(ports);
+ return ERR_PTR(-ENOMEM);
}
+ np = of_get_child_by_name(ports, name);
+ kfree(name);
+ of_node_put(ports);
return np;
}
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
static int tegra_xusb_padctl_probe(struct platform_device *pdev)
{
- struct device_node *np = of_node_get(pdev->dev.of_node);
+ struct device_node *np = pdev->dev.of_node;
const struct tegra_xusb_padctl_soc *soc;
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
int err;
/* for backwards compatibility with old device trees */
- np = of_find_node_by_name(np, "pads");
+ np = of_get_child_by_name(np, "pads");
if (!np) {
dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
return tegra_xusb_padctl_legacy_probe(pdev);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 4571cc098b76..ce126955212c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -63,6 +63,16 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ help
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
+
config PINCTRL_BF54x
def_bool y if BF54x
select PINCTRL_ADI2
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index d0d4844f8022..4777f1595ce2 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bdedb6325c72..4471fd94e1fe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
clear_bit(i, chip->irq.valid_mask);
}
+ /*
+ * The same set of machines in chv_no_valid_mask[] have incorrectly
+ * configured GPIOs that generate spurious interrupts so we use
+ * this same list to apply another quirk for them.
+ *
+ * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+ */
+ if (!need_valid_mask) {
+ /*
+ * Mask all interrupts the community is able to generate
+ * but leave the ones that can only generate GPEs unmasked.
+ */
+ chv_writel(GENMASK(31, pctrl->community->nirqs),
+ pctrl->regs + CHV_INTMASK);
+ }
+
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
new file mode 100644
index 000000000000..22d3bb0bf927
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -0,0 +1,476 @@
+/*
+ * AXP20x pinctrl and GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS 0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
+#define AXP20X_GPIO_FUNCTION_INPUT 2
+
+#define AXP20X_FUNC_GPIO_OUT 0
+#define AXP20X_FUNC_GPIO_IN 1
+#define AXP20X_FUNC_LDO 2
+#define AXP20X_FUNC_ADC 3
+#define AXP20X_FUNCS_NB 4
+
+#define AXP20X_MUX_GPIO_OUT 0
+#define AXP20X_MUX_GPIO_IN BIT(1)
+#define AXP20X_MUX_ADC BIT(2)
+
+#define AXP813_MUX_ADC (BIT(2) | BIT(0))
+
+struct axp20x_pctrl_desc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ /* Stores the pins supporting LDO function. Bit offset is pin number. */
+ u8 ldo_mask;
+ /* Stores the pins supporting ADC function. Bit offset is pin number. */
+ u8 adc_mask;
+ u8 gpio_status_offset;
+ u8 adc_mux;
+};
+
+struct axp20x_pinctrl_function {
+ const char *name;
+ unsigned int muxval;
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct axp20x_pctl {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctl_dev;
+ struct device *dev;
+ const struct axp20x_pctrl_desc *desc;
+ struct axp20x_pinctrl_function funcs[AXP20X_FUNCS_NB];
+};
+
+static const struct pinctrl_pin_desc axp209_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"),
+};
+
+static const struct pinctrl_pin_desc axp813_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+};
+
+static const struct axp20x_pctrl_desc axp20x_data = {
+ .pins = axp209_pins,
+ .npins = ARRAY_SIZE(axp209_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 4,
+ .adc_mux = AXP20X_MUX_ADC,
+};
+
+static const struct axp20x_pctrl_desc axp813_data = {
+ .pins = axp813_pins,
+ .npins = ARRAY_SIZE(axp813_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0),
+ .gpio_status_offset = 0,
+ .adc_mux = AXP813_MUX_ADC,
+};
+
+static int axp20x_gpio_get_reg(unsigned int offset)
+{
+ switch (offset) {
+ case 0:
+ return AXP20X_GPIO0_CTRL;
+ case 1:
+ return AXP20X_GPIO1_CTRL;
+ case 2:
+ return AXP20X_GPIO2_CTRL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(pctl->regmap, AXP20X_GPIO20_SS, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset + pctl->desc->gpio_status_offset));
+}
+
+static int axp20x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(pctl->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * This shouldn't really happen if the pin is in use already,
+ * or if it's not in use yet, it doesn't matter since we're
+ * going to change the value soon anyway. Default to output.
+ */
+ if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
+ return 0;
+
+ /*
+ * The GPIO directions are the three lowest values.
+ * 2 is input, 0 and 1 are output
+ */
+ return val & 2;
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ chip->set(chip, offset, value);
+
+ return 0;
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return;
+
+ regmap_update_bits(pctl->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
+ u8 config)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ config);
+}
+
+static int axp20x_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ARRAY_SIZE(pctl->funcs);
+}
+
+static const char *axp20x_pmx_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->funcs[selector].name;
+}
+
+static int axp20x_pmx_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->funcs[selector].groups;
+ *num_groups = pctl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+static int axp20x_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int mask;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ if (function <= AXP20X_FUNC_GPIO_IN)
+ return axp20x_pmx_set(pctldev, group,
+ pctl->funcs[function].muxval);
+
+ if (function == AXP20X_FUNC_LDO)
+ mask = pctl->desc->ldo_mask;
+ else
+ mask = pctl->desc->adc_mask;
+
+ if (!(BIT(group) & mask))
+ return -EINVAL;
+
+ /*
+ * We let the regulator framework handle the LDO muxing as muxing bits
+ * are basically also regulators on/off bits. It's better not to enforce
+ * any state of the regulator when selecting LDO mux so that we don't
+ * interfere with the regulator driver.
+ */
+ if (function == AXP20X_FUNC_LDO)
+ return 0;
+
+ return axp20x_pmx_set(pctldev, group, pctl->funcs[function].muxval);
+}
+
+static int axp20x_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (input)
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval);
+
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval);
+}
+
+static const struct pinmux_ops axp20x_pmx_ops = {
+ .get_functions_count = axp20x_pmx_func_cnt,
+ .get_function_name = axp20x_pmx_func_name,
+ .get_function_groups = axp20x_pmx_func_groups,
+ .set_mux = axp20x_pmx_set_mux,
+ .gpio_set_direction = axp20x_pmx_gpio_set_direction,
+ .strict = true,
+};
+
+static int axp20x_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->npins;
+}
+
+static int axp20x_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned int *)&pctl->desc->pins[selector];
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const char *axp20x_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->pins[selector].name;
+}
+
+static const struct pinctrl_ops axp20x_pctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = axp20x_groups_cnt,
+ .get_group_name = axp20x_group_name,
+ .get_group_pins = axp20x_group_pins,
+};
+
+static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ unsigned int mask_len,
+ struct axp20x_pinctrl_function *func,
+ const struct pinctrl_pin_desc *pins)
+{
+ unsigned long int mask_cpy = mask;
+ const char **group;
+ unsigned int ngroups = hweight8(mask);
+ int bit;
+
+ func->ngroups = ngroups;
+ if (func->ngroups > 0) {
+ func->groups = devm_kzalloc(dev, ngroups * sizeof(const char *),
+ GFP_KERNEL);
+ group = func->groups;
+ for_each_set_bit(bit, &mask_cpy, mask_len) {
+ *group = pins[bit].name;
+ group++;
+ }
+ }
+}
+
+static void axp20x_build_funcs_groups(struct platform_device *pdev)
+{
+ struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
+ int i, pin, npins = pctl->desc->npins;
+
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].name = "gpio_in";
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval = AXP20X_MUX_GPIO_IN;
+ pctl->funcs[AXP20X_FUNC_LDO].name = "ldo";
+ /*
+ * Muxval for LDO is useless as we won't use it.
+ * See comment in axp20x_pmx_set_mux.
+ */
+ pctl->funcs[AXP20X_FUNC_ADC].name = "adc";
+ pctl->funcs[AXP20X_FUNC_ADC].muxval = pctl->desc->adc_mux;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ for (i = 0; i <= AXP20X_FUNC_GPIO_IN; i++) {
+ pctl->funcs[i].ngroups = npins;
+ pctl->funcs[i].groups = devm_kzalloc(&pdev->dev,
+ npins * sizeof(char *),
+ GFP_KERNEL);
+ for (pin = 0; pin < npins; pin++)
+ pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
+ }
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_LDO],
+ pctl->desc->pins);
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_ADC],
+ pctl->desc->pins);
+}
+
+static const struct of_device_id axp20x_pctl_match[] = {
+ { .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_pctl_match);
+
+static int axp20x_pctl_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp20x_pctl *pctl;
+ struct device *dev = &pdev->dev;
+ struct pinctrl_desc *pctrl_desc;
+ int ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->chip.base = -1;
+ pctl->chip.can_sleep = true;
+ pctl->chip.request = gpiochip_generic_request;
+ pctl->chip.free = gpiochip_generic_free;
+ pctl->chip.parent = &pdev->dev;
+ pctl->chip.label = dev_name(&pdev->dev);
+ pctl->chip.owner = THIS_MODULE;
+ pctl->chip.get = axp20x_gpio_get;
+ pctl->chip.get_direction = axp20x_gpio_get_direction;
+ pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.direction_input = axp20x_gpio_input;
+ pctl->chip.direction_output = axp20x_gpio_output;
+ pctl->chip.ngpio = pctl->desc->npins;
+
+ pctl->desc = (struct axp20x_pctrl_desc *)of_device_get_match_data(dev);
+ pctl->regmap = axp20x->regmap;
+ pctl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pctl);
+
+ axp20x_build_funcs_groups(pdev);
+
+ pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ if (!pctrl_desc)
+ return -ENOMEM;
+
+ pctrl_desc->name = dev_name(&pdev->dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pins = pctl->desc->pins;
+ pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->pctlops = &axp20x_pctrl_ops;
+ pctrl_desc->pmxops = &axp20x_pmx_ops;
+
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
+ if (IS_ERR(pctl->pctl_dev)) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return PTR_ERR(pctl->pctl_dev);
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &pctl->chip, pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctl->chip, dev_name(&pdev->dev),
+ pctl->desc->pins->number,
+ pctl->desc->pins->number,
+ pctl->desc->npins);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add pin range\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "AXP209 pinctrl and GPIO driver loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver axp20x_pctl_driver = {
+ .probe = axp20x_pctl_probe,
+ .driver = {
+ .name = "axp20x-gpio",
+ .of_match_table = axp20x_pctl_match,
+ },
+};
+
+module_platform_driver(axp20x_pctl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e6cd8de793e2..3501491e5bfc 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
*/
static struct lock_class_key pcs_lock_class;
+/* Class for the IRQ request mutex */
+static struct lock_class_key pcs_request_class;
+
/*
* REVISIT: Reads and writes could eventually use regmap or something
* generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, pcs_soc);
irq_set_chip_and_handler(irq, &pcs->chip,
handle_level_irq);
- irq_set_lockdep_class(irq, &pcs_lock_class);
+ irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a276c61be217..e62ab087bfd8 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
}
static int stm32_gpio_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e290bbda..e728a96cabfd 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
-config CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+config CROS_EC_CTL
+ tristate
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index a077b1f0211d..ff3b369911f0 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,10 +2,9 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o \
- cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
+ cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4cc66f405760..5473e602f7e0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -29,9 +29,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
@@ -191,11 +188,11 @@ error:
return ret;
}
-static unsigned int cros_ec_console_log_poll(struct file *file,
+static __poll_t cros_ec_console_log_poll(struct file *file,
poll_table *wait)
{
struct cros_ec_debugfs *debug_info = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
@@ -390,6 +387,7 @@ remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
+EXPORT_SYMBOL(cros_ec_debugfs_init);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
{
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
}
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644
index 1ff3a50aa1b8..000000000000
--- a/drivers/platform/chrome/cros_ec_debugfs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index fd2b047a2748..6ea79d495aa2 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -33,8 +33,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include "cros_ec_dev.h"
-
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
@@ -414,6 +412,7 @@ error:
return ret;
}
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
int lb_suspend(struct cros_ec_dev *ec)
{
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
}
+EXPORT_SYMBOL(lb_suspend);
int lb_resume(struct cros_ec_dev *ec)
{
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
}
+EXPORT_SYMBOL(lb_resume);
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
.attrs = __lb_cmds_attrs,
.is_visible = cros_ec_lightbar_attrs_are_visible,
};
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..d6eebe872187 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,8 +34,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "cros_ec_dev.h"
-
/* Accessor functions */
static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
};
+EXPORT_SYMBOL(cros_ec_attr_group);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 564a0d08c8bf..6d38e6b08334 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
.bin_attrs = cros_ec_vbc_bin_attrs,
.is_bin_visible = cros_ec_vbc_is_visible,
};
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 0578d34eec3f..999f1152655a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -536,10 +536,10 @@ static ssize_t goldfish_pipe_write(struct file *filp,
/* is_write */ 1);
}
-static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status;
poll_wait(filp, &pipe->wake_queue, wait);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 935121814c97..a4fabf9d75f3 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4124,7 +4124,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
if (kfifo_len(&sonypi_compat.fifo))
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 6505c97705e1..1b491690ce07 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
if (key_code == KEY_RESERVED)
return;
if (pressed)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a2370f..daa68acbc900 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
class_unregister(&wmi_bus_class);
}
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
module_exit(acpi_wmi_exit);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e681140b85d8..077f334fdbae 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- return 0;
+ return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f054cdddfef8..803666ae3635 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
-#include <linux/kallsyms.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 75f63e38a8d1..ed2b109ae8fc 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
struct rockchip_iodomain {
struct device *dev;
struct regmap *grf;
- struct rockchip_iodomain_soc_data *soc_data;
+ const struct rockchip_iodomain_soc_data *soc_data;
struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
};
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
- .data = (void *)&soc_data_rk3188
+ .data = &soc_data_rk3188
},
{
.compatible = "rockchip,rk3228-io-voltage-domain",
- .data = (void *)&soc_data_rk3228
+ .data = &soc_data_rk3228
},
{
.compatible = "rockchip,rk3288-io-voltage-domain",
- .data = (void *)&soc_data_rk3288
+ .data = &soc_data_rk3288
},
{
.compatible = "rockchip,rk3328-io-voltage-domain",
- .data = (void *)&soc_data_rk3328
+ .data = &soc_data_rk3328
},
{
.compatible = "rockchip,rk3368-io-voltage-domain",
- .data = (void *)&soc_data_rk3368
+ .data = &soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3368_pmu
+ .data = &soc_data_rk3368_pmu
},
{
.compatible = "rockchip,rk3399-io-voltage-domain",
- .data = (void *)&soc_data_rk3399
+ .data = &soc_data_rk3399
},
{
.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3399_pmu
+ .data = &soc_data_rk3399_pmu
},
{
.compatible = "rockchip,rv1108-io-voltage-domain",
- .data = (void *)&soc_data_rv1108
+ .data = &soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rv1108_pmu
+ .data = &soc_data_rv1108_pmu
},
{ /* sentinel */ },
};
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
match = of_match_node(rockchip_iodomain_match, np);
- iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+ iod->soc_data = match->data;
parent = pdev->dev.parent;
if (parent && parent->of_node) {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ca0de1a78e85..a102e74ab24e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -98,15 +98,6 @@ config POWER_RESET_HISI
help
Reboot support for Hisilicon boards.
-config POWER_RESET_IMX
- bool "IMX6 power-off driver"
- depends on POWER_RESET && SOC_IMX6
- help
- This driver support power off external PMIC by PMIC_ON_REQ on i.mx6
- boards.If you want to use other pin to control external power,please
- say N here or disable in dts to make sure pm_power_off never be
- overwrote wrongly by this driver.
-
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index aeb65edb17b7..dcc92f5f7a37 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
-obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 31080c254124..0206cce328b3 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -68,7 +68,7 @@ struct shdwc_config {
};
struct shdwc {
- struct shdwc_config *cfg;
+ const struct shdwc_config *cfg;
void __iomem *at91_shdwc_base;
};
@@ -260,7 +260,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = (struct shdwc_config *)(match->data);
+ at91_shdwc->cfg = match->data;
sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sclk))
diff --git a/drivers/power/reset/imx-snvs-poweroff.c b/drivers/power/reset/imx-snvs-poweroff.c
deleted file mode 100644
index ad6ce5020ea7..000000000000
--- a/drivers/power/reset/imx-snvs-poweroff.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Power off driver for i.mx6
- * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved.
- *
- * based on msm-poweroff.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-static void __iomem *snvs_base;
-
-static void do_imx_poweroff(void)
-{
- u32 value = readl(snvs_base);
-
- /* set TOP and DP_EN bit */
- writel(value | 0x60, snvs_base);
-}
-
-static int imx_poweroff_probe(struct platform_device *pdev)
-{
- snvs_base = of_iomap(pdev->dev.of_node, 0);
- if (!snvs_base) {
- dev_err(&pdev->dev, "failed to get memory\n");
- return -ENODEV;
- }
-
- pm_power_off = do_imx_poweroff;
- return 0;
-}
-
-static const struct of_device_id of_imx_poweroff_match[] = {
- { .compatible = "fsl,sec-v4.0-poweroff", },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_imx_poweroff_match);
-
-static struct platform_driver imx_poweroff_driver = {
- .probe = imx_poweroff_probe,
- .driver = {
- .name = "imx-snvs-poweroff",
- .of_match_table = of_match_ptr(of_imx_poweroff_match),
- },
-};
-
-static int __init imx_poweroff_init(void)
-{
- return platform_driver_register(&imx_poweroff_driver);
-}
-device_initcall(imx_poweroff_init);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efdfe466..01b8c71697cb 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
+static int deassert_pshold(struct notifier_block *nb, unsigned long action,
void *data)
{
writel(0, msm_ps_hold);
@@ -33,14 +33,13 @@ static int do_msm_restart(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block restart_nb = {
- .notifier_call = do_msm_restart,
+ .notifier_call = deassert_pshold,
.priority = 128,
};
static void do_msm_poweroff(void)
{
- /* TODO: Add poweroff capability */
- do_msm_restart(&restart_nb, 0, NULL);
+ deassert_pshold(&restart_nb, 0, NULL);
}
static int msm_restart_probe(struct platform_device *pdev)
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index 7549c7f74a3c..c03e96e6a041 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
},
};
module_platform_driver(zx_reboot_driver);
+
+MODULE_DESCRIPTION("ZTE SoCs reset driver");
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 4ebbcce45c48..5a76c6d343de 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }
if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 38f4e87cf24d..0771f951b11f 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -159,7 +159,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
- struct axp_data *axp_data;
+ const struct axp_data *axp_data;
static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
NULL };
int i, irq, ret;
@@ -176,7 +176,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+ axp_data = of_device_get_match_data(&pdev->dev);
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
@@ -230,10 +230,10 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_ac_power_match[] = {
{
.compatible = "x-powers,axp202-ac-power-supply",
- .data = (void *)&axp20x_data,
+ .data = &axp20x_data,
}, {
.compatible = "x-powers,axp221-ac-power-supply",
- .data = (void *)&axp22x_data,
+ .data = &axp22x_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index d51ebd1da65e..9bfbde15b07d 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -1,6 +1,7 @@
/*
* axp288_charger.c - X-power AXP288 PMIC Charger driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -98,28 +99,10 @@
#define CV_4200MV 4200 /* 4200mV */
#define CV_4350MV 4350 /* 4350mV */
-#define CC_200MA 200 /* 200mA */
-#define CC_600MA 600 /* 600mA */
-#define CC_800MA 800 /* 800mA */
-#define CC_1000MA 1000 /* 1000mA */
-#define CC_1600MA 1600 /* 1600mA */
-#define CC_2000MA 2000 /* 2000mA */
-
-#define ILIM_100MA 100 /* 100mA */
-#define ILIM_500MA 500 /* 500mA */
-#define ILIM_900MA 900 /* 900mA */
-#define ILIM_1500MA 1500 /* 1500mA */
-#define ILIM_2000MA 2000 /* 2000mA */
-#define ILIM_2500MA 2500 /* 2500mA */
-#define ILIM_3000MA 3000 /* 3000mA */
-
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
#define USB_HOST_EXTCON_HID "INT3496"
#define USB_HOST_EXTCON_NAME "INT3496:00"
-static const unsigned int cable_ids[] =
- { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -139,7 +122,6 @@ struct axp288_chrg_info {
struct regmap_irq_chip_data *regmap_irqc;
int irq[CHRG_INTR_END];
struct power_supply *psy_usb;
- struct mutex lock;
/* OTG/Host mode */
struct {
@@ -152,18 +134,14 @@ struct axp288_chrg_info {
/* SDP/CDP/DCP USB charging cable notifications */
struct {
struct extcon_dev *edev;
- bool connected;
- enum power_supply_type chg_type;
- struct notifier_block nb[ARRAY_SIZE(cable_ids)];
+ struct notifier_block nb;
struct work_struct work;
} cable;
- int inlmt;
int cc;
int cv;
int max_cc;
int max_cv;
- int is_charger_enabled;
};
static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -220,51 +198,63 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
return ret;
}
-static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
- int inlmt)
+static int axp288_charger_get_vbus_inlmt(struct axp288_chrg_info *info)
{
- int ret;
unsigned int val;
- u8 reg_val;
+ int ret;
- /* Read in limit register */
ret = regmap_read(info->regmap, AXP20X_CHRG_BAK_CTRL, &val);
if (ret < 0)
- goto set_inlmt_fail;
-
- if (inlmt <= ILIM_100MA) {
- reg_val = CHRG_VBUS_ILIM_100MA;
- inlmt = ILIM_100MA;
- } else if (inlmt <= ILIM_500MA) {
- reg_val = CHRG_VBUS_ILIM_500MA;
- inlmt = ILIM_500MA;
- } else if (inlmt <= ILIM_900MA) {
- reg_val = CHRG_VBUS_ILIM_900MA;
- inlmt = ILIM_900MA;
- } else if (inlmt <= ILIM_1500MA) {
- reg_val = CHRG_VBUS_ILIM_1500MA;
- inlmt = ILIM_1500MA;
- } else if (inlmt <= ILIM_2000MA) {
- reg_val = CHRG_VBUS_ILIM_2000MA;
- inlmt = ILIM_2000MA;
- } else if (inlmt <= ILIM_2500MA) {
- reg_val = CHRG_VBUS_ILIM_2500MA;
- inlmt = ILIM_2500MA;
- } else {
- reg_val = CHRG_VBUS_ILIM_3000MA;
- inlmt = ILIM_3000MA;
+ return ret;
+
+ val >>= CHRG_VBUS_ILIM_BIT_POS;
+ switch (val) {
+ case CHRG_VBUS_ILIM_100MA:
+ return 100000;
+ case CHRG_VBUS_ILIM_500MA:
+ return 500000;
+ case CHRG_VBUS_ILIM_900MA:
+ return 900000;
+ case CHRG_VBUS_ILIM_1500MA:
+ return 1500000;
+ case CHRG_VBUS_ILIM_2000MA:
+ return 2000000;
+ case CHRG_VBUS_ILIM_2500MA:
+ return 2500000;
+ case CHRG_VBUS_ILIM_3000MA:
+ return 3000000;
+ default:
+ dev_warn(&info->pdev->dev, "Unknown ilim reg val: %d\n", val);
+ return 0;
}
+}
- reg_val = (val & ~CHRG_VBUS_ILIM_MASK)
- | (reg_val << CHRG_VBUS_ILIM_BIT_POS);
- ret = regmap_write(info->regmap, AXP20X_CHRG_BAK_CTRL, reg_val);
- if (ret >= 0)
- info->inlmt = inlmt;
+static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
+ int inlmt)
+{
+ int ret;
+ u8 reg_val;
+
+ if (inlmt >= 3000000)
+ reg_val = CHRG_VBUS_ILIM_3000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2500000)
+ reg_val = CHRG_VBUS_ILIM_2500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2000000)
+ reg_val = CHRG_VBUS_ILIM_2000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 1500000)
+ reg_val = CHRG_VBUS_ILIM_1500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 900000)
+ reg_val = CHRG_VBUS_ILIM_900MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 500000)
+ reg_val = CHRG_VBUS_ILIM_500MA << CHRG_VBUS_ILIM_BIT_POS;
else
- dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
+ reg_val = CHRG_VBUS_ILIM_100MA << CHRG_VBUS_ILIM_BIT_POS;
+ ret = regmap_update_bits(info->regmap, AXP20X_CHRG_BAK_CTRL,
+ CHRG_VBUS_ILIM_MASK, reg_val);
+ if (ret < 0)
+ dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
-set_inlmt_fail:
return ret;
}
@@ -283,7 +273,6 @@ static int axp288_charger_vbus_path_select(struct axp288_chrg_info *info,
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 vbus path select %d\n", ret);
-
return ret;
}
@@ -292,9 +281,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
{
int ret;
- if ((int)enable == info->is_charger_enabled)
- return 0;
-
if (enable)
ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -303,8 +289,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
CHRG_CCCV_CHG_EN, 0);
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 enable charger %d\n", ret);
- else
- info->is_charger_enabled = enable;
return ret;
}
@@ -376,8 +360,6 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
int ret = 0;
int scaled_val;
- mutex_lock(&info->lock);
-
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
scaled_val = min(val->intval, info->max_cc);
@@ -393,11 +375,15 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
if (ret < 0)
dev_warn(&info->pdev->dev, "set charge voltage failed\n");
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_set_vbus_inlmt(info, val->intval);
+ if (ret < 0)
+ dev_warn(&info->pdev->dev, "set input current limit failed\n");
+ break;
default:
ret = -EINVAL;
}
- mutex_unlock(&info->lock);
return ret;
}
@@ -406,9 +392,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct axp288_chrg_info *info = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&info->lock);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -419,7 +403,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_present(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_ONLINE:
@@ -430,7 +414,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_online(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -448,17 +432,17 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
val->intval = info->max_cv * 1000;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
- val->intval = info->inlmt * 1000;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_get_vbus_inlmt(info);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
default:
- ret = -EINVAL;
- goto psy_get_prop_fail;
+ return -EINVAL;
}
-psy_get_prop_fail:
- mutex_unlock(&info->lock);
- return ret;
+ return 0;
}
static int axp288_charger_property_is_writeable(struct power_supply *psy,
@@ -469,6 +453,7 @@ static int axp288_charger_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = 1;
break;
default:
@@ -487,7 +472,7 @@ static enum power_supply_property axp288_usb_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
static const struct power_supply_desc axp288_charger_desc = {
@@ -565,99 +550,53 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
container_of(work, struct axp288_chrg_info, cable.work);
int ret, current_limit;
struct extcon_dev *edev = info->cable.edev;
- bool old_connected = info->cable.connected;
- enum power_supply_type old_chg_type = info->cable.chg_type;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading status (%d)\n", ret);
+ return;
+ }
+
+ /* Offline? Disable charging and bail */
+ if (!(val & PS_STAT_VBUS_VALID)) {
+ dev_dbg(&info->pdev->dev, "USB charger disconnected\n");
+ axp288_charger_enable_charger(info, false);
+ power_supply_changed(info->psy_usb);
+ return;
+ }
/* Determine cable/charger type */
if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
+ dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
+ current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ dev_dbg(&info->pdev->dev, "USB CDP charger is connected\n");
+ current_limit = 1500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
- dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ dev_dbg(&info->pdev->dev, "USB DCP charger is connected\n");
+ current_limit = 2000000;
} else {
- if (old_connected)
- dev_dbg(&info->pdev->dev, "USB charger disconnected");
- info->cable.connected = false;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- }
-
- /* Cable status changed */
- if (old_connected == info->cable.connected &&
- old_chg_type == info->cable.chg_type)
+ /* Charger type detection still in progress, bail. */
return;
-
- mutex_lock(&info->lock);
-
- if (info->cable.connected) {
- axp288_charger_enable_charger(info, false);
-
- switch (info->cable.chg_type) {
- case POWER_SUPPLY_TYPE_USB:
- current_limit = ILIM_500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
- current_limit = ILIM_1500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_DCP:
- current_limit = ILIM_2000MA;
- break;
- default:
- /* Unknown */
- current_limit = 0;
- break;
- }
-
- /* Set vbus current limit first, then enable charger */
- ret = axp288_charger_set_vbus_inlmt(info, current_limit);
- if (ret == 0)
- axp288_charger_enable_charger(info, true);
- else
- dev_err(&info->pdev->dev,
- "error setting current limit (%d)", ret);
- } else {
- axp288_charger_enable_charger(info, false);
}
- mutex_unlock(&info->lock);
+ /* Set vbus current limit first, then enable charger */
+ ret = axp288_charger_set_vbus_inlmt(info, current_limit);
+ if (ret == 0)
+ axp288_charger_enable_charger(info, true);
+ else
+ dev_err(&info->pdev->dev,
+ "error setting current limit (%d)\n", ret);
power_supply_changed(info->psy_usb);
}
-/*
- * We need 3 copies of this, because there is no way to find out for which
- * cable id we are being called from the passed in arguments; and we must
- * have a separate nb for each extcon_register_notifier call.
- */
-static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
- unsigned long event, void *param)
+static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[0]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[1]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+ container_of(nb, struct axp288_chrg_info, cable.nb);
schedule_work(&info->cable.work);
return NOTIFY_OK;
}
@@ -785,6 +724,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return 0;
}
+static void axp288_charger_cancel_work(void *data)
+{
+ struct axp288_chrg_info *info = data;
+
+ cancel_work_sync(&info->otg.work);
+ cancel_work_sync(&info->cable.work);
+}
+
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
@@ -799,8 +746,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->pdev = pdev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->cable.chg_type = -1;
- info->is_charger_enabled = -1;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
if (info->cable.edev == NULL) {
@@ -820,7 +765,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- mutex_init(&info->lock);
ret = charger_init_hw_regs(info);
if (ret)
@@ -836,19 +780,19 @@ static int axp288_charger_probe(struct platform_device *pdev)
return ret;
}
+ /* Cancel our work on cleanup, register this before the notifiers */
+ ret = devm_add_action(dev, axp288_charger_cancel_work, info);
+ if (ret)
+ return ret;
+
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
- info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
- info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
- info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
- for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
- ret = devm_extcon_register_notifier(dev, info->cable.edev,
- cable_ids[i], &info->cable.nb[i]);
- if (ret) {
- dev_err(dev, "failed to register extcon notifier for %u: %d\n",
- cable_ids[i], ret);
- return ret;
- }
+ info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
+ ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(dev, "failed to register cable extcon notifier\n");
+ return ret;
}
schedule_work(&info->cable.work);
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index a8dcabc32721..4cc6e038dfdd 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1,6 +1,7 @@
/*
* axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,6 +17,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -31,6 +33,12 @@
#include <linux/seq_file.h>
#include <asm/unaligned.h>
+#define PS_STAT_VBUS_TRIGGER (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD (1 << 3)
+#define PS_STAT_VBUS_VALID (1 << 4)
+#define PS_STAT_VBUS_PRESENT (1 << 5)
+
#define CHRG_STAT_BAT_SAFE_MODE (1 << 3)
#define CHRG_STAT_BAT_VALID (1 << 4)
#define CHRG_STAT_BAT_PRESENT (1 << 5)
@@ -100,11 +108,22 @@ enum {
WL1_IRQ,
};
+enum {
+ BAT_TEMP = 0,
+ PMIC_TEMP,
+ SYSTEM_TEMP,
+ BAT_CHRG_CURR,
+ BAT_D_CURR,
+ BAT_VOLT,
+ IIO_CHANNEL_NUM
+};
+
struct axp288_fg_info {
struct platform_device *pdev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[AXP288_FG_INTR_NUM];
+ struct iio_channel *iio_channel[IIO_CHANNEL_NUM];
struct power_supply *bat;
struct mutex lock;
int status;
@@ -199,33 +218,6 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
}
-static int pmic_read_adc_val(const char *name, int *raw_val,
- struct axp288_fg_info *info)
-{
- int ret, val = 0;
- struct iio_channel *indio_chan;
-
- indio_chan = iio_channel_get(NULL, name);
- if (IS_ERR_OR_NULL(indio_chan)) {
- ret = PTR_ERR(indio_chan);
- goto exit;
- }
- ret = iio_read_channel_raw(indio_chan, &val);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "IIO channel read error: %x, %x\n", ret, val);
- goto err_exit;
- }
-
- dev_dbg(&info->pdev->dev, "adc raw val=%x\n", val);
- *raw_val = val;
-
-err_exit:
- iio_channel_release(indio_chan);
-exit:
- return ret;
-}
-
#ifdef CONFIG_DEBUG_FS
static int fuel_gauge_debug_show(struct seq_file *s, void *data)
{
@@ -296,22 +288,22 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
AXP288_FG_TUNE5,
fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
- ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-batttemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-pmic-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-system-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-systtemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-battvolt : %d\n", raw_val);
@@ -351,8 +343,7 @@ static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
static void fuel_gauge_get_status(struct axp288_fg_info *info)
{
- int pwr_stat, ret;
- int charge, discharge;
+ int pwr_stat, fg_res;
pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
if (pwr_stat < 0) {
@@ -360,36 +351,32 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
"PWR STAT read failed:%d\n", pwr_stat);
return;
}
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC charge current read failed:%d\n", ret);
- return;
- }
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC discharge current read failed:%d\n", ret);
- return;
+
+ /* Report full if Vbus is valid and the reported capacity is 100% */
+ if (pwr_stat & PS_STAT_VBUS_VALID) {
+ fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+ if (fg_res < 0) {
+ dev_err(&info->pdev->dev,
+ "FG RES read failed: %d\n", fg_res);
+ return;
+ }
+ if (fg_res == (FG_REP_CAP_VALID | 100)) {
+ info->status = POWER_SUPPLY_STATUS_FULL;
+ return;
+ }
}
- if (charge > 0)
+ if (pwr_stat & PS_STAT_BAT_CHRG_DIR)
info->status = POWER_SUPPLY_STATUS_CHARGING;
- else if (discharge > 0)
+ else
info->status = POWER_SUPPLY_STATUS_DISCHARGING;
- else {
- if (pwr_stat & CHRG_STAT_BAT_PRESENT)
- info->status = POWER_SUPPLY_STATUS_FULL;
- else
- info->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
}
static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
{
int ret = 0, raw_val;
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret < 0)
goto vbatt_read_fail;
@@ -400,24 +387,19 @@ vbatt_read_fail:
static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
{
- int ret, value = 0;
- int charge, discharge;
+ int ret, discharge;
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0)
- goto current_read_fail;
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
+ /* First check discharge current, so that we do only 1 read on bat. */
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
if (ret < 0)
- goto current_read_fail;
+ return ret;
- if (charge > 0)
- value = charge;
- else if (discharge > 0)
- value = -1 * discharge;
+ if (discharge > 0) {
+ *cur = -1 * discharge;
+ return 0;
+ }
- *cur = value;
-current_read_fail:
- return ret;
+ return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
}
static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
@@ -698,12 +680,54 @@ intr_failed:
}
}
+/*
+ * Some devices have no battery (HDMI sticks) and the axp288 battery's
+ * detection reports one despite it not being there.
+ */
+static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ {
+ /* Intel Cherry Trail Compute Stick, Windows version */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ },
+ },
+ {
+ /* Intel Cherry Trail Compute Stick, version without an OS */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ },
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ },
+ {}
+};
+
static int axp288_fuel_gauge_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int i, ret = 0;
struct axp288_fg_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
+ static const char * const iio_chan_name[] = {
+ [BAT_TEMP] = "axp288-batt-temp",
+ [PMIC_TEMP] = "axp288-pmic-temp",
+ [SYSTEM_TEMP] = "axp288-system-temp",
+ [BAT_CHRG_CURR] = "axp288-chrg-curr",
+ [BAT_D_CURR] = "axp288-chrg-d-curr",
+ [BAT_VOLT] = "axp288-batt-volt",
+ };
+
+ if (dmi_check_system(axp288_fuel_gauge_blacklist))
+ return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -719,18 +743,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
mutex_init(&info->lock);
INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++) {
+ /*
+ * Note cannot use devm_iio_channel_get because x86 systems
+ * lack the device<->channel maps which iio_channel_get will
+ * try to use when passed a non NULL device pointer.
+ */
+ info->iio_channel[i] =
+ iio_channel_get(NULL, iio_chan_name[i]);
+ if (IS_ERR(info->iio_channel[i])) {
+ ret = PTR_ERR(info->iio_channel[i]);
+ dev_dbg(&pdev->dev, "error getting iiochan %s: %d\n",
+ iio_chan_name[i], ret);
+ /* Wait for axp288_adc to load */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+
+ goto out_free_iio_chan;
+ }
+ }
+
ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
if (!(ret & FG_DES_CAP1_VALID)) {
dev_err(&pdev->dev, "axp288 not configured by firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_iio_chan;
}
ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
case CHRG_CCCV_CV_4100MV:
info->max_volt = 4100;
@@ -751,7 +796,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
if (IS_ERR(info->bat)) {
ret = PTR_ERR(info->bat);
dev_err(&pdev->dev, "failed to register battery: %d\n", ret);
- return ret;
+ goto out_free_iio_chan;
}
fuel_gauge_create_debugfs(info);
@@ -759,6 +804,13 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
return 0;
+
+out_free_iio_chan:
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ if (!IS_ERR_OR_NULL(info->iio_channel[i]))
+ iio_channel_release(info->iio_channel[i]);
+
+ return ret;
}
static const struct platform_device_id axp288_fg_id_table[] = {
@@ -780,6 +832,9 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
if (info->irq[i] >= 0)
free_irq(info->irq[i], info);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ iio_channel_release(info->iio_channel[i]);
+
return 0;
}
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 35ff406aca48..b58df04d03b3 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -162,9 +161,6 @@ struct bq24190_dev_info {
struct device *dev;
struct power_supply *charger;
struct power_supply *battery;
- struct extcon_dev *extcon;
- struct notifier_block extcon_nb;
- struct delayed_work extcon_work;
struct delayed_work input_current_limit_work;
char model_name[I2C_NAME_SIZE];
bool initialized;
@@ -686,6 +682,16 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
int ret, limit = 100;
u8 v;
+ /*
+ * This prop. can be passed on device instantiation from platform code:
+ * struct property_entry pe[] =
+ * { PROPERTY_ENTRY_BOOL("disable-reset"), ... };
+ * struct i2c_board_info bi =
+ * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
+ * struct i2c_adapter ad = { ... };
+ * i2c_add_adapter(&ad);
+ * i2c_new_device(&ad, &bi);
+ */
if (device_property_read_bool(bdi->dev, "disable-reset"))
return 0;
@@ -1193,8 +1199,6 @@ static int bq24190_charger_set_property(struct power_supply *psy,
static int bq24190_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- int ret;
-
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
@@ -1202,13 +1206,10 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- ret = 1;
- break;
+ return 1;
default:
- ret = 0;
+ return 0;
}
-
- return ret;
}
static void bq24190_input_current_limit_work(struct work_struct *work)
@@ -1623,75 +1624,6 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void bq24190_extcon_work(struct work_struct *work)
-{
- struct bq24190_dev_info *bdi =
- container_of(work, struct bq24190_dev_info, extcon_work.work);
- int error, iinlim = 0;
- u8 v;
-
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
- dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- return;
- }
-
- if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_SDP) == 1)
- iinlim = 500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_CDP) == 1 ||
- extcon_get_state(bdi->extcon, EXTCON_CHG_USB_ACA) == 1)
- iinlim = 1500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_DCP) == 1)
- iinlim = 2000000;
-
- if (iinlim) {
- error = bq24190_set_field_val(bdi, BQ24190_REG_ISC,
- BQ24190_REG_ISC_IINLIM_MASK,
- BQ24190_REG_ISC_IINLIM_SHIFT,
- bq24190_isc_iinlim_values,
- ARRAY_SIZE(bq24190_isc_iinlim_values),
- iinlim);
- if (error < 0)
- dev_err(bdi->dev, "Can't set IINLIM: %d\n", error);
- }
-
- /* if no charger found and in USB host mode, set OTG 5V boost, else normal */
- if (!iinlim && extcon_get_state(bdi->extcon, EXTCON_USB_HOST) == 1)
- v = BQ24190_REG_POC_CHG_CONFIG_OTG;
- else
- v = BQ24190_REG_POC_CHG_CONFIG_CHARGE;
-
- error = bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT,
- v);
- if (error < 0)
- dev_err(bdi->dev, "Can't set CHG_CONFIG: %d\n", error);
-
- pm_runtime_mark_last_busy(bdi->dev);
- pm_runtime_put_autosuspend(bdi->dev);
-}
-
-static int bq24190_extcon_event(struct notifier_block *nb, unsigned long event,
- void *param)
-{
- struct bq24190_dev_info *bdi =
- container_of(nb, struct bq24190_dev_info, extcon_nb);
-
- /*
- * The Power-Good detection may take up to 220ms, sometimes
- * the external charger detection is quicker, and the bq24190 will
- * reset to iinlim based on its own charger detection (which is not
- * hooked up when using external charger detection) resulting in
- * a too low default 500mA iinlim. Delay applying the extcon value
- * for 300ms to avoid this.
- */
- queue_delayed_work(system_wq, &bdi->extcon_work, msecs_to_jiffies(300));
-
- return NOTIFY_OK;
-}
-
static int bq24190_hw_init(struct bq24190_dev_info *bdi)
{
u8 v;
@@ -1766,7 +1698,6 @@ static int bq24190_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct power_supply_config charger_cfg = {}, battery_cfg = {};
struct bq24190_dev_info *bdi;
- const char *name;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1796,25 +1727,6 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- /*
- * Devicetree platforms should get extcon via phandle (not yet supported).
- * On ACPI platforms, extcon clients may invoke us with:
- * struct property_entry pe[] =
- * { PROPERTY_ENTRY_STRING("extcon-name", client_name), ... };
- * struct i2c_board_info bi =
- * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
- * struct i2c_adapter ad = { ... };
- * i2c_add_adapter(&ad);
- * i2c_new_device(&ad, &bi);
- */
- if (device_property_read_string(dev, "extcon-name", &name) == 0) {
- bdi->extcon = extcon_get_extcon_dev(name);
- if (!bdi->extcon)
- return -EPROBE_DEFER;
-
- dev_info(bdi->dev, "using extcon device %s\n", name);
- }
-
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 600);
@@ -1882,20 +1794,6 @@ static int bq24190_probe(struct i2c_client *client,
if (ret < 0)
goto out_sysfs;
- if (bdi->extcon) {
- INIT_DELAYED_WORK(&bdi->extcon_work, bq24190_extcon_work);
- bdi->extcon_nb.notifier_call = bq24190_extcon_event;
- ret = devm_extcon_register_notifier_all(dev, bdi->extcon,
- &bdi->extcon_nb);
- if (ret) {
- dev_err(dev, "Can't register extcon\n");
- goto out_sysfs;
- }
-
- /* Sync initial cable state */
- queue_delayed_work(system_wq, &bdi->extcon_work, 0);
- }
-
enable_irq_wake(client->irq);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 51f0961ecf3e..d99981542a46 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -323,6 +323,30 @@ static u8
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
BQ27XXX_DM_REG_ROWS,
},
+ bq27521_regs[BQ27XXX_REG_MAX] = {
+ [BQ27XXX_REG_CTRL] = 0x02,
+ [BQ27XXX_REG_TEMP] = 0x0a,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x0c,
+ [BQ27XXX_REG_AI] = 0x0e,
+ [BQ27XXX_REG_FLAGS] = 0x08,
+ [BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_FCC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ },
bq27530_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -557,6 +581,15 @@ static enum power_supply_property bq27520g4_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27521_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
static enum power_supply_property bq27530_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -671,6 +704,7 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
#define bq27520g2_dm_regs 0
#define bq27520g3_dm_regs 0
#define bq27520g4_dm_regs 0
+#define bq27521_dm_regs 0
#define bq27530_dm_regs 0
#define bq27531_dm_regs 0
#define bq27541_dm_regs 0
@@ -717,8 +751,8 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
#endif
#define BQ27XXX_O_ZERO 0x00000001
-#define BQ27XXX_O_OTDC 0x00000002
-#define BQ27XXX_O_UTOT 0x00000004
+#define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */
+#define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */
#define BQ27XXX_O_CFGUP 0x00000008
#define BQ27XXX_O_RAM 0x00000010
@@ -751,6 +785,7 @@ static struct {
[BQ27520G2] = BQ27XXX_DATA(bq27520g2, 0 , BQ27XXX_O_OTDC),
[BQ27520G3] = BQ27XXX_DATA(bq27520g3, 0 , BQ27XXX_O_OTDC),
[BQ27520G4] = BQ27XXX_DATA(bq27520g4, 0 , BQ27XXX_O_OTDC),
+ [BQ27521] = BQ27XXX_DATA(bq27521, 0 , 0),
[BQ27530] = BQ27XXX_DATA(bq27530, 0 , BQ27XXX_O_UTOT),
[BQ27531] = BQ27XXX_DATA(bq27531, 0 , BQ27XXX_O_UTOT),
[BQ27541] = BQ27XXX_DATA(bq27541, 0 , BQ27XXX_O_OTDC),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 0b11ed472f33..6b25e5f2337e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -239,6 +239,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27520g2", BQ27520G2 },
{ "bq27520g3", BQ27520G3 },
{ "bq27520g4", BQ27520G4 },
+ { "bq27521", BQ27521 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27531 },
{ "bq27541", BQ27541 },
@@ -269,6 +270,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27520g2" },
{ .compatible = "ti,bq27520g3" },
{ .compatible = "ti,bq27520g4" },
+ { .compatible = "ti,bq27521" },
{ .compatible = "ti,bq27530" },
{ .compatible = "ti,bq27531" },
{ .compatible = "ti,bq27541" },
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 6502fa7c2106..1de4b4493824 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -578,7 +578,7 @@ static int check_charging_duration(struct charger_manager *cm)
} else if (is_ext_pwr_online(cm) && !cm->charger_enabled) {
duration = curr - cm->charging_end_time;
- if (duration > desc->charging_max_duration_ms &&
+ if (duration > desc->discharging_max_duration_ms &&
is_ext_pwr_online(cm)) {
dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index ee71a2b37b12..98ba07869c3b 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -586,8 +586,8 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_battery_irq_thread,
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 08e4fd9ee607..4cfa3f0cd689 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -60,6 +60,7 @@ enum ltc294x_id {
#define LTC294X_REG_CONTROL_PRESCALER_SET(x) \
((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK)
#define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0
+#define LTC294X_REG_CONTROL_ADC_DISABLE(x) ((x) & ~(BIT(7) | BIT(6)))
struct ltc294x_info {
struct i2c_client *client; /* I2C Client pointer */
@@ -523,6 +524,29 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
return 0;
}
+static void ltc294x_i2c_shutdown(struct i2c_client *client)
+{
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+ int ret;
+ u8 value;
+ u8 control;
+
+ /* The LTC2941 does not need any special handling */
+ if (info->id == LTC2941_ID)
+ return;
+
+ /* Read control register */
+ ret = ltc294x_read_regs(info->client, LTC294X_REG_CONTROL, &value, 1);
+ if (ret < 0)
+ return;
+
+ /* Disable continuous ADC conversion as this drains the battery */
+ control = LTC294X_REG_CONTROL_ADC_DISABLE(value);
+ if (control != value)
+ ltc294x_write_regs(info->client, LTC294X_REG_CONTROL,
+ &control, 1);
+}
+
#ifdef CONFIG_PM_SLEEP
static int ltc294x_suspend(struct device *dev)
@@ -589,6 +613,7 @@ static struct i2c_driver ltc294x_driver = {
},
.probe = ltc294x_i2c_probe,
.remove = ltc294x_i2c_remove,
+ .shutdown = ltc294x_i2c_shutdown,
.id_table = ltc294x_i2c_id,
};
module_i2c_driver(ltc294x_driver);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 5b556a13f517..35dde81b1c9b 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -123,6 +123,8 @@ static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
static int max17042_get_status(struct max17042_chip *chip, int *status)
{
int ret, charge_full, charge_now;
+ int avg_current;
+ u32 data;
ret = power_supply_am_i_supplied(chip->battery);
if (ret < 0) {
@@ -152,10 +154,31 @@ static int max17042_get_status(struct max17042_chip *chip, int *status)
if (ret < 0)
return ret;
- if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD)
+ if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD) {
*status = POWER_SUPPLY_STATUS_FULL;
- else
+ return 0;
+ }
+
+ /*
+ * Even though we are supplied, we may still be discharging if the
+ * supply is e.g. only delivering 5V 0.5A. Check current if available.
+ */
+ if (!chip->pdata->enable_current_sense) {
*status = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ }
+
+ ret = regmap_read(chip->regmap, MAX17042_AvgCurrent, &data);
+ if (ret < 0)
+ return ret;
+
+ avg_current = sign_extend32(data, 15);
+ avg_current *= 1562500 / chip->pdata->r_sns;
+
+ if (avg_current > 0)
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
return 0;
}
@@ -863,16 +886,13 @@ static void max17042_init_worker(struct work_struct *work)
#ifdef CONFIG_OF
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_of_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
- if (!np)
- return dev->platform_data;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -897,7 +917,8 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
-#else
+#endif
+
static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
/*
* Some firmwares do not set FullSOCThr, Enable End-of-Charge Detection
@@ -907,15 +928,12 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
};
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_default_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
- if (dev->platform_data)
- return dev->platform_data;
-
/*
* The MAX17047 gets used on x86 where we might not have pdata, assume
* the firmware will already have initialized the fuel-gauge and provide
@@ -948,7 +966,21 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
+
+static struct max17042_platform_data *
+max17042_get_pdata(struct max17042_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+#ifdef CONFIG_OF
+ if (dev->of_node)
+ return max17042_get_of_pdata(chip);
#endif
+ if (dev->platform_data)
+ return dev->platform_data;
+
+ return max17042_get_default_pdata(chip);
+}
static const struct regmap_config max17042_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index ccb4217b9638..cb6e8f66c7a2 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -183,7 +183,7 @@ static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
return ret;
/* chan goes from 1 ... 4 */
- reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ reg = BIT(SBSM_SMB_BAT_OFFSET + chan);
ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
if (ret)
dev_err(dev, "Failed to select channel %i\n", chan);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d1694f1def72..35636e1d8a3d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
+#include <linux/suspend.h>
#include <asm/iosf_mbi.h>
#include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ u64 last_power_limit;
};
static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
struct rapl_domain *rd;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl, ret;;
+ int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static enum cpuhp_state pcap_rapl_online;
+static void power_limit_state_save(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, ret, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT1,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT2,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT1,
+ rd->rpl[i].last_power_limit);
+ break;
+ case PL2_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT2,
+ rd->rpl[i].last_power_limit);
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_SUSPEND_PREPARE:
+ power_limit_state_save();
+ break;
+ case PM_POST_SUSPEND:
+ power_limit_state_restore();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+ .notifier_call = rapl_pm_callback,
+};
+
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
/* Don't bail out if PSys is not supported */
rapl_register_psys();
+
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret)
+ goto err_unreg_all;
+
return 0;
+err_unreg_all:
+ cpuhp_remove_state(pcap_rapl_online);
+
err_unreg:
rapl_unregister_powercap();
return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
static void __exit rapl_exit(void)
{
+ unregister_pm_notifier(&rapl_pm_notifier);
cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 5b10b50f8686..64b2b2501a79 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
- int result = 0;
+ int result;
result = seed_constraint_attributes();
if (result)
return result;
- result = class_register(&powercap_class);
-
- return result;
+ return class_register(&powercap_class);
}
device_initcall(powercap_init);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6eb0db37dd88..1d42385b1aa5 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -49,7 +49,7 @@ static DEFINE_IDR(pps_idr);
* Char device methods
*/
-static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
+static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
{
struct pps_device *pps = file->private_data;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 58a97d420572..a593b4cf47bf 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -280,7 +280,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return err;
}
-unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
+__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b86f1bfecd6f..c7c62b782cb9 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -90,7 +90,7 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode);
ssize_t ptp_read(struct posix_clock *pc,
uint flags, char __user *buf, size_t cnt);
-uint ptp_poll(struct posix_clock *pc,
+__poll_t ptp_poll(struct posix_clock *pc,
struct file *fp, poll_table *wait);
/*
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index ec4bc1515f0d..6092b3a5978e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2319,7 +2319,7 @@ static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
{
struct mport_cdev_priv *priv = filp->private_data;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index ca44e6977cf2..2d9ec378a8bc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -327,7 +327,7 @@ int cec_add_elem(u64 pfn)
} else {
/* We have reached max count for this page, soft-offline it. */
pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
+ memory_failure_queue(pfn, MF_SOFT_OFFLINE);
ca->pfns_poisoned++;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 96cd55f9e3c5..b27417ca188a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -744,6 +744,13 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_SC2731
+ tristate "Spreadtrum SC2731 power regulator driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators on the
+ SC2731 PMIC.
+
config REGULATOR_SKY81452
tristate "Skyworks Solutions SKY81452 voltage regulator"
depends on MFD_SKY81452
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 80ffc57a9ca3..19fea09ba10a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b64b7916507f..42681c10cbe4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -58,8 +58,6 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
-static struct class regulator_class;
-
/*
* struct regulator_map
*
@@ -112,11 +110,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static struct regulator_dev *dev_to_rdev(struct device *dev)
-{
- return container_of(dev, struct regulator_dev, dev);
-}
-
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -236,26 +229,35 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* return 0 if the state is valid */
+static int regulator_check_states(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE);
+}
+
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
+ struct regulator_voltage *voltage;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
- if (!regulator->min_uV && !regulator->max_uV)
+ if (!voltage->min_uV && !voltage->max_uV)
continue;
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+ if (*max_uV > voltage->max_uV)
+ *max_uV = voltage->max_uV;
+ if (*min_uV < voltage->min_uV)
+ *min_uV = voltage->min_uV;
}
if (*min_uV > *max_uV) {
@@ -324,6 +326,24 @@ static int regulator_mode_constrain(struct regulator_dev *rdev,
return -EINVAL;
}
+static inline struct regulator_state *
+regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
+{
+ if (rdev->constraints == NULL)
+ return NULL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return &rdev->constraints->state_standby;
+ case PM_SUSPEND_MEM:
+ return &rdev->constraints->state_mem;
+ case PM_SUSPEND_MAX:
+ return &rdev->constraints->state_disk;
+ default:
+ return NULL;
+ }
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,29 +751,32 @@ static int drms_uA_update(struct regulator_dev *rdev)
}
static int suspend_set_state(struct regulator_dev *rdev,
- struct regulator_state *rstate)
+ suspend_state_t state)
{
int ret = 0;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
/* If we have no suspend mode configration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
- if (!rstate->enabled && !rstate->disabled) {
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
if (rdev->desc->ops->set_suspend_voltage ||
rdev->desc->ops->set_suspend_mode)
rdev_warn(rdev, "No configuration\n");
return 0;
}
- if (rstate->enabled && rstate->disabled) {
- rdev_err(rdev, "invalid configuration\n");
- return -EINVAL;
- }
-
- if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
+ if (rstate->enabled == ENABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_enable)
ret = rdev->desc->ops->set_suspend_enable(rdev);
- else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
+ else if (rstate->enabled == DISABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_disable)
ret = rdev->desc->ops->set_suspend_disable(rdev);
else /* OK if set_suspend_enable or set_suspend_disable is NULL */
ret = 0;
@@ -778,28 +801,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
}
- return ret;
-}
-
-/* locks held by caller */
-static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
-{
- if (!rdev->constraints)
- return -EINVAL;
- switch (state) {
- case PM_SUSPEND_STANDBY:
- return suspend_set_state(rdev,
- &rdev->constraints->state_standby);
- case PM_SUSPEND_MEM:
- return suspend_set_state(rdev,
- &rdev->constraints->state_mem);
- case PM_SUSPEND_MAX:
- return suspend_set_state(rdev,
- &rdev->constraints->state_disk);
- default:
- return -EINVAL;
- }
+ return ret;
}
static void print_constraints(struct regulator_dev *rdev)
@@ -1068,7 +1071,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_prepare(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_state(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
return ret;
@@ -1356,9 +1359,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->min_uV);
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->max_uV);
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444,
regulator->debugfs, regulator,
&constraint_flags_fops);
@@ -1417,20 +1420,6 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
-{
- struct device *dev;
-
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
-
- return dev ? dev_to_rdev(dev) : NULL;
-}
-
static int regulator_match(struct device *dev, const void *data)
{
struct regulator_dev *r = dev_to_rdev(dev);
@@ -2468,10 +2457,9 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
-static int _regulator_list_voltage(struct regulator *regulator,
- unsigned selector, int lock)
+static int _regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector, int lock)
{
- struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
@@ -2487,7 +2475,8 @@ static int _regulator_list_voltage(struct regulator *regulator,
if (lock)
mutex_unlock(&rdev->mutex);
} else if (rdev->is_switch && rdev->supply) {
- ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ ret = _regulator_list_voltage(rdev->supply->rdev,
+ selector, lock);
} else {
return -EINVAL;
}
@@ -2563,7 +2552,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- return _regulator_list_voltage(regulator, selector, 1);
+ return _regulator_list_voltage(regulator->rdev, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2605,8 +2594,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
- *vsel_reg = rdev->desc->vsel_reg;
- *vsel_mask = rdev->desc->vsel_mask;
+ *vsel_reg = rdev->desc->vsel_reg;
+ *vsel_mask = rdev->desc->vsel_mask;
return 0;
}
@@ -2897,10 +2886,38 @@ out:
return ret;
}
+static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, suspend_state_t state)
+{
+ struct regulator_state *rstate;
+ int uV, sel;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (min_uV < rstate->min_uV)
+ min_uV = rstate->min_uV;
+ if (max_uV > rstate->max_uV)
+ max_uV = rstate->max_uV;
+
+ sel = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
+
+ uV = rdev->desc->ops->list_voltage(rdev, sel);
+ if (uV >= min_uV && uV <= max_uV)
+ rstate->uV = uV;
+
+ return 0;
+}
+
static int regulator_set_voltage_unlocked(struct regulator *regulator,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ suspend_state_t state)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[state];
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
@@ -2911,7 +2928,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* should be a noop (some cpufreq implementations use the same
* voltage for multiple frequencies, for example).
*/
- if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
@@ -2921,8 +2938,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
goto out;
}
}
@@ -2940,12 +2957,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out;
/* restore original values in case of error */
- old_min_uV = regulator->min_uV;
- old_max_uV = regulator->max_uV;
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ old_min_uV = voltage->min_uV;
+ old_max_uV = voltage->max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
if (ret < 0)
goto out2;
@@ -2963,7 +2980,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out2;
}
- best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
goto out2;
@@ -2982,7 +2999,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (supply_change_uV > 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
@@ -2990,13 +3007,17 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
}
}
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ if (state == PM_SUSPEND_ON)
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ else
+ ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
+ max_uV, state);
if (ret < 0)
goto out2;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret)
dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
ret);
@@ -3007,8 +3028,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
out2:
- regulator->min_uV = old_min_uV;
- regulator->max_uV = old_max_uV;
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
return ret;
}
@@ -3037,7 +3058,8 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
regulator_lock_supply(regulator->rdev);
- ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
+ PM_SUSPEND_ON);
regulator_unlock_supply(regulator->rdev);
@@ -3045,6 +3067,89 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
+ suspend_state_t state, bool en)
+{
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (!rstate->changeable)
+ return -EPERM;
+
+ rstate->enabled = en;
+
+ return 0;
+}
+
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return regulator_suspend_toggle(rdev, state, true);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_enable);
+
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator *regulator;
+ struct regulator_voltage *voltage;
+
+ /*
+ * if any consumer wants this regulator device keeping on in
+ * suspend states, don't set it as disabled.
+ */
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
+ if (voltage->min_uV || voltage->max_uV)
+ return 0;
+ }
+
+ return regulator_suspend_toggle(rdev, state, false);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_disable);
+
+static int _regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (rstate->min_uV == rstate->max_uV) {
+ rdev_err(rdev, "The suspend voltage can't be changed!\n");
+ return -EPERM;
+ }
+
+ return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state);
+}
+
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int ret = 0;
+
+ /* PM_SUSPEND_ON is handled by regulator_set_voltage() */
+ if (regulator_check_states(state) || state == PM_SUSPEND_ON)
+ return -EINVAL;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = _regulator_set_suspend_voltage(regulator, min_uV,
+ max_uV, state);
+
+ regulator_unlock_supply(regulator->rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
+
/**
* regulator_set_voltage_time - get raise/fall time
* @regulator: regulator source
@@ -3138,6 +3243,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
int regulator_sync_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
mutex_lock(&rdev->mutex);
@@ -3149,20 +3255,20 @@ int regulator_sync_voltage(struct regulator *regulator)
}
/* This is only going to work if we've had a voltage configured. */
- if (!regulator->min_uV && !regulator->max_uV) {
+ if (!voltage->min_uV && !voltage->max_uV) {
ret = -EINVAL;
goto out;
}
- min_uV = regulator->min_uV;
- max_uV = regulator->max_uV;
+ min_uV = voltage->min_uV;
+ max_uV = voltage->max_uV;
/* This should be a paranoia check... */
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0);
if (ret < 0)
goto out;
@@ -3918,12 +4024,6 @@ static void regulator_dev_release(struct device *dev)
kfree(rdev);
}
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
struct device *parent = rdev->dev.parent;
@@ -4174,81 +4274,86 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
-static int _regulator_suspend_prepare(struct device *dev, void *data)
+#ifdef CONFIG_SUSPEND
+static int _regulator_suspend_late(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const suspend_state_t *state = data;
+ suspend_state_t *state = data;
int ret;
mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, *state);
+ ret = suspend_set_state(rdev, *state);
mutex_unlock(&rdev->mutex);
return ret;
}
/**
- * regulator_suspend_prepare - prepare regulators for system wide suspend
+ * regulator_suspend_late - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
- * This will usually be called by machine suspend code prior to supending.
*/
-int regulator_suspend_prepare(suspend_state_t state)
+static int regulator_suspend_late(struct device *dev)
{
- /* ON is handled by regulator active state */
- if (state == PM_SUSPEND_ON)
- return -EINVAL;
+ suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_prepare);
+ _regulator_suspend_late);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
-
-static int _regulator_suspend_finish(struct device *dev, void *data)
+static int _regulator_resume_early(struct device *dev, void *data)
{
+ int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
- int ret;
+ suspend_state_t *state = data;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, *state);
+ if (rstate == NULL)
+ return -EINVAL;
mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- ret = _regulator_do_enable(rdev);
- if (ret)
- dev_err(dev,
- "Failed to resume regulator %d\n",
- ret);
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
- ret = _regulator_do_disable(rdev);
- if (ret)
- dev_err(dev, "Failed to suspend regulator %d\n", ret);
- }
-unlock:
+ if (rdev->desc->ops->resume_early &&
+ (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND))
+ ret = rdev->desc->ops->resume_early(rdev);
+
mutex_unlock(&rdev->mutex);
- /* Keep processing regulators in spite of any errors */
- return 0;
+ return ret;
}
-/**
- * regulator_suspend_finish - resume regulators from system wide suspend
- *
- * Turn on regulators that might be turned off by regulator_suspend_prepare
- * and that should be turned on according to the regulators properties.
- */
-int regulator_suspend_finish(void)
+static int regulator_resume_early(struct device *dev)
{
- return class_for_each_device(&regulator_class, NULL, NULL,
- _regulator_suspend_finish);
+ suspend_state_t state = pm_suspend_target_state;
+
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_resume_early);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+#else /* !CONFIG_SUSPEND */
+
+#define regulator_suspend_late NULL
+#define regulator_resume_early NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
+ .suspend_late = regulator_suspend_late,
+ .resume_early = regulator_resume_early,
+};
+#endif
+
+struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+#ifdef CONFIG_PM
+ .pm = &regulator_pm_ops,
+#endif
+};
/**
* regulator_has_full_constraints - the system has fully specified constraints
*
@@ -4424,8 +4529,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
seq_printf(s, "%37dmV %5dmV",
- consumer->min_uV / 1000,
- consumer->max_uV / 1000);
+ consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
+ consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
case REGULATOR_CURRENT:
break;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 66a8ea0c8386..abfd56e8c78a 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -16,10 +16,25 @@
#ifndef __REGULATOR_INTERNAL_H
#define __REGULATOR_INTERNAL_H
+#include <linux/suspend.h>
+
+#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+
+struct regulator_voltage {
+ int min_uV;
+ int max_uV;
+};
+
/*
* struct regulator
*
* One for each consumer device.
+ * @voltage - a voltage array for each state of runtime, i.e.:
+ * PM_SUSPEND_ON
+ * PM_SUSPEND_TO_IDLE
+ * PM_SUSPEND_STANDBY
+ * PM_SUSPEND_MEM
+ * PM_SUSPEND_MAX
*/
struct regulator {
struct device *dev;
@@ -27,14 +42,22 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
- int min_uV;
- int max_uV;
+ struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
};
+extern struct class regulator_class;
+
+static inline struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 14637a01ba2d..092ed6efb3ec 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -177,14 +177,30 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
- suspend_state->enabled = true;
+ suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
- suspend_state->disabled = true;
+ suspend_state->enabled = DISABLE_IN_SUSPEND;
+ else
+ suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
+
+ if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
+ &pval))
+ suspend_state->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
+ &pval))
+ suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ else /* otherwise use min_uV as default suspend voltage */
+ suspend_state->uV = suspend_state->min_uV;
+
+ if (of_property_read_bool(suspend_np,
+ "regulator-changeable-in-suspend"))
+ suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
@@ -376,3 +392,17 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
return init_data;
}
+
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 0241ada47d04..63c7a0c17777 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -486,24 +486,6 @@ static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
-static int spmi_regulator_common_is_enabled(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 reg;
-
- spmi_vreg_read(vreg, SPMI_COMMON_REG_ENABLE, &reg, 1);
-
- return (reg & SPMI_COMMON_ENABLE_MASK) == SPMI_COMMON_ENABLE;
-}
-
-static int spmi_regulator_common_enable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -513,7 +495,7 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
vreg->vs_enable_time = ktime_get();
}
- return spmi_regulator_common_enable(rdev);
+ return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
@@ -524,14 +506,6 @@ static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
-static int spmi_regulator_common_disable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
@@ -1062,9 +1036,9 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
}
static struct regulator_ops spmi_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1077,9 +1051,9 @@ static struct regulator_ops spmi_smps_ops = {
};
static struct regulator_ops spmi_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1094,9 +1068,9 @@ static struct regulator_ops spmi_ldo_ops = {
};
static struct regulator_ops spmi_ln_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1107,8 +1081,8 @@ static struct regulator_ops spmi_ln_ldo_ops = {
static struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
@@ -1117,9 +1091,9 @@ static struct regulator_ops spmi_vs_ops = {
};
static struct regulator_ops spmi_boost_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1128,9 +1102,9 @@ static struct regulator_ops spmi_boost_ops = {
};
static struct regulator_ops spmi_ftsmps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1143,9 +1117,9 @@ static struct regulator_ops spmi_ftsmps_ops = {
};
static struct regulator_ops spmi_ult_lo_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
@@ -1157,9 +1131,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
};
static struct regulator_ops spmi_ult_ho_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
@@ -1172,9 +1146,9 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
};
static struct regulator_ops spmi_ult_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1711,6 +1685,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
+ vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
+ vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
@@ -1723,6 +1700,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = vreg;
+ config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
new file mode 100644
index 000000000000..eb2bdf060b7b
--- /dev/null
+++ b/drivers/regulator/sc2731-regulator.c
@@ -0,0 +1,256 @@
+ //SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * SC2731 regulator lock register
+ */
+#define SC2731_PWR_WR_PROT 0xf0c
+#define SC2731_WR_UNLOCK_VALUE 0x6e7f
+
+/*
+ * SC2731 enable register
+ */
+#define SC2731_POWER_PD_SW 0xc28
+#define SC2731_LDO_CAMA0_PD 0xcfc
+#define SC2731_LDO_CAMA1_PD 0xd04
+#define SC2731_LDO_CAMMOT_PD 0xd0c
+#define SC2731_LDO_VLDO_PD 0xd6c
+#define SC2731_LDO_EMMCCORE_PD 0xd2c
+#define SC2731_LDO_SDCORE_PD 0xd74
+#define SC2731_LDO_SDIO_PD 0xd70
+#define SC2731_LDO_WIFIPA_PD 0xd4c
+#define SC2731_LDO_USB33_PD 0xd5c
+#define SC2731_LDO_CAMD0_PD 0xd7c
+#define SC2731_LDO_CAMD1_PD 0xd84
+#define SC2731_LDO_CON_PD 0xd8c
+#define SC2731_LDO_CAMIO_PD 0xd94
+#define SC2731_LDO_SRAM_PD 0xd78
+
+/*
+ * SC2731 enable mask
+ */
+#define SC2731_DCDC_CPU0_PD_MASK BIT(4)
+#define SC2731_DCDC_CPU1_PD_MASK BIT(3)
+#define SC2731_DCDC_RF_PD_MASK BIT(11)
+#define SC2731_LDO_CAMA0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMA1_PD_MASK BIT(0)
+#define SC2731_LDO_CAMMOT_PD_MASK BIT(0)
+#define SC2731_LDO_VLDO_PD_MASK BIT(0)
+#define SC2731_LDO_EMMCCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDIO_PD_MASK BIT(0)
+#define SC2731_LDO_WIFIPA_PD_MASK BIT(0)
+#define SC2731_LDO_USB33_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD1_PD_MASK BIT(0)
+#define SC2731_LDO_CON_PD_MASK BIT(0)
+#define SC2731_LDO_CAMIO_PD_MASK BIT(0)
+#define SC2731_LDO_SRAM_PD_MASK BIT(0)
+
+/*
+ * SC2731 vsel register
+ */
+#define SC2731_DCDC_CPU0_VOL 0xc54
+#define SC2731_DCDC_CPU1_VOL 0xc64
+#define SC2731_DCDC_RF_VOL 0xcb8
+#define SC2731_LDO_CAMA0_VOL 0xd00
+#define SC2731_LDO_CAMA1_VOL 0xd08
+#define SC2731_LDO_CAMMOT_VOL 0xd10
+#define SC2731_LDO_VLDO_VOL 0xd28
+#define SC2731_LDO_EMMCCORE_VOL 0xd30
+#define SC2731_LDO_SDCORE_VOL 0xd38
+#define SC2731_LDO_SDIO_VOL 0xd40
+#define SC2731_LDO_WIFIPA_VOL 0xd50
+#define SC2731_LDO_USB33_VOL 0xd60
+#define SC2731_LDO_CAMD0_VOL 0xd80
+#define SC2731_LDO_CAMD1_VOL 0xd88
+#define SC2731_LDO_CON_VOL 0xd90
+#define SC2731_LDO_CAMIO_VOL 0xd98
+#define SC2731_LDO_SRAM_VOL 0xdB0
+
+/*
+ * SC2731 vsel register mask
+ */
+#define SC2731_DCDC_CPU0_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_CPU1_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_RF_VOL_MASK GENMASK(8, 0)
+#define SC2731_LDO_CAMA0_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMA1_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMMOT_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_VLDO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_EMMCCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDIO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_WIFIPA_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_USB33_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMD0_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMD1_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CON_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMIO_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_SRAM_VOL_MASK GENMASK(6, 0)
+
+enum sc2731_regulator_id {
+ SC2731_BUCK_CPU0,
+ SC2731_BUCK_CPU1,
+ SC2731_BUCK_RF,
+ SC2731_LDO_CAMA0,
+ SC2731_LDO_CAMA1,
+ SC2731_LDO_CAMMOT,
+ SC2731_LDO_VLDO,
+ SC2731_LDO_EMMCCORE,
+ SC2731_LDO_SDCORE,
+ SC2731_LDO_SDIO,
+ SC2731_LDO_WIFIPA,
+ SC2731_LDO_USB33,
+ SC2731_LDO_CAMD0,
+ SC2731_LDO_CAMD1,
+ SC2731_LDO_CON,
+ SC2731_LDO_CAMIO,
+ SC2731_LDO_SRAM,
+};
+
+static const struct regulator_ops sc2731_regu_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define SC2731_REGU_LINEAR(_id, en_reg, en_mask, vreg, vmask, \
+ vstep, vmin, vmax) { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .ops = &sc2731_regu_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = SC2731_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = vmin, \
+ .n_voltages = ((vmax) - (vmin)) / (vstep) + 1, \
+ .uV_step = vstep, \
+ .enable_is_inverted = true, \
+ .enable_val = 0, \
+ .enable_reg = en_reg, \
+ .enable_mask = en_mask, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+}
+
+static struct regulator_desc regulators[] = {
+ SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
+ SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_CPU1, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU1_PD_MASK, SC2731_DCDC_CPU1_VOL,
+ SC2731_DCDC_CPU1_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_RF, SC2731_POWER_PD_SW, SC2731_DCDC_RF_PD_MASK,
+ SC2731_DCDC_RF_VOL, SC2731_DCDC_RF_VOL_MASK,
+ 3125, 600000, 2196875),
+ SC2731_REGU_LINEAR(LDO_CAMA0, SC2731_LDO_CAMA0_PD,
+ SC2731_LDO_CAMA0_PD_MASK, SC2731_LDO_CAMA0_VOL,
+ SC2731_LDO_CAMA0_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMA1, SC2731_LDO_CAMA1_PD,
+ SC2731_LDO_CAMA1_PD_MASK, SC2731_LDO_CAMA1_VOL,
+ SC2731_LDO_CAMA1_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMMOT, SC2731_LDO_CAMMOT_PD,
+ SC2731_LDO_CAMMOT_PD_MASK, SC2731_LDO_CAMMOT_VOL,
+ SC2731_LDO_CAMMOT_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_VLDO, SC2731_LDO_VLDO_PD,
+ SC2731_LDO_VLDO_PD_MASK, SC2731_LDO_VLDO_VOL,
+ SC2731_LDO_VLDO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_EMMCCORE, SC2731_LDO_EMMCCORE_PD,
+ SC2731_LDO_EMMCCORE_PD_MASK, SC2731_LDO_EMMCCORE_VOL,
+ SC2731_LDO_EMMCCORE_VOL_MASK, 10000, 1200000,
+ 3750000),
+ SC2731_REGU_LINEAR(LDO_SDCORE, SC2731_LDO_SDCORE_PD,
+ SC2731_LDO_SDCORE_PD_MASK, SC2731_LDO_SDCORE_VOL,
+ SC2731_LDO_SDCORE_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_SDIO, SC2731_LDO_SDIO_PD,
+ SC2731_LDO_SDIO_PD_MASK, SC2731_LDO_SDIO_VOL,
+ SC2731_LDO_SDIO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_WIFIPA, SC2731_LDO_WIFIPA_PD,
+ SC2731_LDO_WIFIPA_PD_MASK, SC2731_LDO_WIFIPA_VOL,
+ SC2731_LDO_WIFIPA_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_USB33, SC2731_LDO_USB33_PD,
+ SC2731_LDO_USB33_PD_MASK, SC2731_LDO_USB33_VOL,
+ SC2731_LDO_USB33_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMD0, SC2731_LDO_CAMD0_PD,
+ SC2731_LDO_CAMD0_PD_MASK, SC2731_LDO_CAMD0_VOL,
+ SC2731_LDO_CAMD0_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMD1, SC2731_LDO_CAMD1_PD,
+ SC2731_LDO_CAMD1_PD_MASK, SC2731_LDO_CAMD1_VOL,
+ SC2731_LDO_CAMD1_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CON, SC2731_LDO_CON_PD,
+ SC2731_LDO_CON_PD_MASK, SC2731_LDO_CON_VOL,
+ SC2731_LDO_CON_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMIO, SC2731_LDO_CAMIO_PD,
+ SC2731_LDO_CAMIO_PD_MASK, SC2731_LDO_CAMIO_VOL,
+ SC2731_LDO_CAMIO_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_SRAM, SC2731_LDO_SRAM_PD,
+ SC2731_LDO_SRAM_PD_MASK, SC2731_LDO_SRAM_VOL,
+ SC2731_LDO_SRAM_VOL_MASK, 6250, 1000000, 1793750),
+};
+
+static int sc2731_regulator_unlock(struct regmap *regmap)
+{
+ return regmap_write(regmap, SC2731_PWR_WR_PROT,
+ SC2731_WR_UNLOCK_VALUE);
+}
+
+static int sc2731_regulator_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to get regmap.\n");
+ return -ENODEV;
+ }
+
+ ret = sc2731_regulator_unlock(regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to release regulator lock\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sc2731_regulator_driver = {
+ .driver = {
+ .name = "sc27xx-regulator",
+ },
+ .probe = sc2731_regulator_probe,
+};
+
+module_platform_driver(sc2731_regulator_driver);
+
+MODULE_AUTHOR("Chen Junhui <erick.chen@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC2731 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index bc489958fed7..1827185beacc 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -28,9 +28,6 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65218.h>
-enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
- DCDC5, DCDC6, LDO1, LS3 };
-
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
@@ -329,6 +326,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65218_NUM_REGULATOR, GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b01774e9fac0..e540ca362d08 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -919,12 +919,12 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
-static unsigned int qcom_smd_poll(struct rpmsg_endpoint *ept,
+static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
struct qcom_smd_channel *channel = qsept->qsch;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &channel->fblockread_event, wait);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index e0996fce3963..e622fcda30fa 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -256,10 +256,10 @@ free_kbuf:
return ret < 0 ? ret : len;
}
-static unsigned int rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
+static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
{
struct rpmsg_eptdev *eptdev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!eptdev->ept)
return POLLERR;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index dffa3aab7178..5a081762afcc 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
*
* Returns mask representing the current state of the endpoint's send buffers
*/
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
{
if (WARN_ON(!ept))
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 0cf9c7e2ee83..685aa70e9cbe 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -71,7 +71,7 @@ struct rpmsg_endpoint_ops {
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
- unsigned int (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
+ __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 215eac68ae2d..5a7b30d0773b 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -194,7 +194,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606e0df8..ee14d8e45c97 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index a7917d473774..0c075d100252 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -661,9 +661,9 @@ static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
return effective_count;
}
-static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
struct eerbuffer *eerb;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba15a53..614b44e70a28 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
endif
+CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index bf4ab4efed73..956f662908a6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -429,7 +429,7 @@ out_copy:
return count;
}
-static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a782a207ad31..c7e484f70654 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,9 +91,6 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
-config QETH_IPV6
- def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
config CCWGROUP
tristate
default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 92ae84a927fc..0ee8f33efb54 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
static void
lcs_get_reply(struct lcs_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- atomic_inc(&reply->refcnt);
+ refcount_inc(&reply->refcnt);
}
static void
lcs_put_reply(struct lcs_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- if (atomic_dec_and_test(&reply->refcnt)) {
+ if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
- }
-
}
static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
if (!reply)
return NULL;
- atomic_set(&reply->refcnt,1);
+ refcount_set(&reply->refcnt, 1);
reply->sequence_no = cmd->sequence_no;
reply->received = 0;
reply->rc = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index fbc8b90b1f85..bd52caa3b11b 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/refcount.h>
#include <asm/ccwdev.h>
#define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
struct lcs_reply {
struct list_head list;
__u16 sequence_no;
- atomic_t refcnt;
+ refcount_t refcnt;
/* Callback for completion notification. */
void (*callback)(struct lcs_card *, struct lcs_cmd *);
wait_queue_head_t wait_q;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 15015a24f8ad..db42107bf2f5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/hashtable.h>
#include <linux/ip.h>
+#include <linux/refcount.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
__u8 ext_flags;
__u16 vlan_id;
__u16 frame_offset;
- __u8 dest_addr[16];
-} __attribute__ ((packed));
+ union {
+ /* TX: */
+ u8 ipv6_addr[16];
+ struct ipv4 {
+ u8 res[12];
+ u32 addr;
+ } ipv4;
+ /* RX: */
+ struct rx {
+ u8 res1[2];
+ u8 src_mac[6];
+ u8 res2[4];
+ u16 vlan_id;
+ u8 res3[2];
+ } rx;
+ } next_hop;
+};
struct qeth_hdr_layer2 {
__u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
int default_out_queue;
};
-#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
-#define QETH_TR_MAC_C 0x0300 /* canonical */
-
/**
* buffer stuff for read channel
*/
@@ -565,9 +575,9 @@ enum qeth_cq {
};
struct qeth_ipato {
- int enabled;
- int invert4;
- int invert6;
+ bool enabled;
+ bool invert4;
+ bool invert6;
struct list_head entries;
};
@@ -632,7 +642,7 @@ struct qeth_reply {
int rc;
void *param;
struct qeth_card *card;
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
- __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+ __be16 prot = veth->h_vlan_proto;
+
+ if (prot == htons(ETH_P_8021Q))
+ prot = veth->h_vlan_encapsulated_proto;
- if (be16_to_cpu(*p) == ETH_P_8021Q)
- p += 2;
- switch (be16_to_cpu(*p)) {
- case ETH_P_IPV6:
+ switch (prot) {
+ case htons(ETH_P_IPV6):
return 6;
- case ETH_P_IP:
+ case htons(ETH_P_IP):
return 4;
default:
return 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 430e3214f7e2..6abd3bc285e4 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -36,6 +36,7 @@
#include <asm/diag.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
#include "qeth_core.h"
@@ -564,7 +565,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply) {
- atomic_set(&reply->refcnt, 1);
+ refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
reply->card = card;
}
@@ -573,14 +574,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
static void qeth_get_reply(struct qeth_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- atomic_inc(&reply->refcnt);
+ refcount_inc(&reply->refcnt);
}
static void qeth_put_reply(struct qeth_reply *reply)
{
- WARN_ON(atomic_read(&reply->refcnt) <= 0);
- if (atomic_dec_and_test(&reply->refcnt))
+ if (refcount_dec_and_test(&reply->refcnt))
kfree(reply);
}
@@ -1480,9 +1479,9 @@ static int qeth_setup_card(struct qeth_card *card)
qeth_set_intial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
- card->ipato.enabled = 0;
- card->ipato.invert4 = 0;
- card->ipato.invert6 = 0;
+ card->ipato.enabled = false;
+ card->ipato.invert4 = false;
+ card->ipato.invert6 = false;
/* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -1717,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
(prcd[0x11] == _ascebc['M']));
}
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+ enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+ struct diag26c_vnic_resp *response = NULL;
+ struct diag26c_vnic_req *request = NULL;
+ struct ccw_dev_id id;
+ char userid[80];
+ int rc = 0;
+
+ QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+ cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+ if (rc)
+ goto out;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+ response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+ if (!request || !response) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ccw_device_get_id(CARD_RDEV(card), &id);
+ request->resp_buf_len = sizeof(*response);
+ request->resp_version = DIAG26C_VERSION6_VM65918;
+ request->req_format = DIAG26C_VNIC_INFO;
+ ASCEBC(userid, 8);
+ memcpy(&request->sys_name, userid, 8);
+ request->devno = id.devno;
+
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+ rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+ if (rc)
+ goto out;
+ QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+ if (request->resp_buf_len < sizeof(*response) ||
+ response->version != request->resp_version) {
+ rc = -EIO;
+ goto out;
+ }
+
+ if (response->protocol == VNIC_INFO_PROT_L2)
+ disc = QETH_DISCIPLINE_LAYER2;
+ else if (response->protocol == VNIC_INFO_PROT_L3)
+ disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+ kfree(response);
+ kfree(request);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+ return disc;
+}
+
/* Determine whether the device requires a specific layer discipline */
static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
+ enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSN) {
+ card->info.type == QETH_CARD_TYPE_OSN)
+ disc = QETH_DISCIPLINE_LAYER2;
+ else if (card->info.guestlan)
+ disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+ QETH_DISCIPLINE_LAYER3 :
+ qeth_vm_detect_layer(card);
+
+ switch (disc) {
+ case QETH_DISCIPLINE_LAYER2:
QETH_DBF_TEXT(SETUP, 3, "force l2");
- return QETH_DISCIPLINE_LAYER2;
- }
-
- /* virtual HiperSocket is L3 only: */
- if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+ break;
+ case QETH_DISCIPLINE_LAYER3:
QETH_DBF_TEXT(SETUP, 3, "force l3");
- return QETH_DISCIPLINE_LAYER3;
+ break;
+ default:
+ QETH_DBF_TEXT(SETUP, 3, "force no");
}
- QETH_DBF_TEXT(SETUP, 3, "force no");
- return QETH_DISCIPLINE_UNDETERMINED;
+ return disc;
}
static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
@@ -4218,9 +4281,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
- memcpy(card->dev->dev_addr,
- &cmd->data.setadapterparms.data.change_addr.addr,
- OSA_ADDR_LEN);
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4304,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
- cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
- memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
- card->dev->dev_addr, OSA_ADDR_LEN);
+ cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+ ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+ card->dev->dev_addr);
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
NULL);
return rc;
@@ -4789,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card)
request->op_code = DIAG26C_GET_MAC;
request->devno = id.devno;
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+ QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
if (rc)
goto out;
+ QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
if (request->resp_buf_len < sizeof(*response) ||
response->version != request->resp_version) {
@@ -5386,6 +5451,13 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_poll);
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+ if (!cmd->hdr.return_code)
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ return cmd->hdr.return_code;
+}
+
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -6242,7 +6314,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
(struct qeth_checksum_cmd *)reply->param;
QETH_CARD_TEXT(card, 4, "chkdoccb");
- if (cmd->hdr.return_code)
+ if (qeth_setassparms_inspect_rc(cmd))
return 0;
memset(chksum_cb, 0, sizeof(*chksum_cb));
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff6877f7b6f8..619f897b4bb0 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -10,6 +10,7 @@
#define __QETH_CORE_MPC_H__
#include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
-#define OSA_ADDR_LEN 6
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
} __attribute__ ((packed));
struct qeth_change_addr {
- __u32 cmd;
- __u32 addr_size;
- __u32 no_macs;
- __u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+ u32 cmd;
+ u32 addr_size;
+ u32 no_macs;
+ u8 addr[ETH_ALEN];
+};
struct qeth_snmp_cmd {
__u8 token[16];
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 09b1c4ef3dc9..f2130051ca11 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
struct qeth_mac {
- u8 mac_addr[OSA_ADDR_LEN];
- u8 is_uc:1;
+ u8 mac_addr[ETH_ALEN];
u8 disp_flag:2;
struct hlist_node hnode;
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5863ea170ff2..7f236440483f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
- memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ cmd->data.setdelmac.mac_length = ETH_ALEN;
+ ether_addr_copy(cmd->data.setdelmac.mac, mac);
return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
NULL, NULL));
}
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
if (rc == 0) {
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ ether_addr_copy(card->dev->dev_addr, mac);
dev_info(&card->gdev->dev,
"MAC address %pM successfully registered on device %s\n",
card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+ QETH_CARD_TEXT(card, 2, "L2Wmac");
+ rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
- mac, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+ mac, QETH_CARD_IFNAME(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
return rc;
}
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
{
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmac");
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+ QETH_CARD_TEXT(card, 2, "L2Rmac");
+ rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2,
- "Could not delete group MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
return rc;
}
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
- if (mac->is_uc) {
- return qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_SETVMAC);
- } else {
- return qeth_l2_send_setgroupmac(card, mac->mac_addr);
- }
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
- if (mac->is_uc) {
- return qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- } else {
- return qeth_l2_send_delgroupmac(card, mac->mac_addr);
- }
-}
-
static void qeth_l2_del_all_macs(struct qeth_card *card)
{
struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
- QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+ QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
* only if there is not in the hash table storage already
*
*/
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
- u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
{
u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
struct qeth_mac *mac;
hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
- if (is_uc == mac->is_uc &&
- !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+ if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
return;
}
}
mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
if (!mac)
return;
- memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
- mac->is_uc = is_uc;
+ ether_addr_copy(mac->mac_addr, ha->addr);
mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
spin_lock_bh(&card->mclock);
netdev_for_each_mc_addr(ha, dev)
- qeth_l2_add_mac(card, ha, 0);
-
+ qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
- qeth_l2_add_mac(card, ha, 1);
+ qeth_l2_add_mac(card, ha);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
- qeth_l2_remove_mac(card, mac);
+ switch (mac->disp_flag) {
+ case QETH_DISP_ADDR_DELETE:
+ qeth_l2_remove_mac(card, mac->mac_addr);
hash_del(&mac->hnode);
kfree(mac);
-
- } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
- rc = qeth_l2_write_mac(card, mac);
+ break;
+ case QETH_DISP_ADDR_ADD:
+ rc = qeth_l2_write_mac(card, mac->mac_addr);
if (rc) {
hash_del(&mac->hnode);
kfree(mac);
- } else
- mac->disp_flag = QETH_DISP_ADDR_DELETE;
- } else
+ break;
+ }
+ /* fall through */
+ default:
+ /* for next call to set_rx_mode(): */
mac->disp_flag = QETH_DISP_ADDR_DELETE;
+ }
}
spin_unlock_bh(&card->mclock);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b577cc..bdd45f4dcace 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
*/
int ref_counter;
enum qeth_prot_versions proto;
- unsigned char mac[OSA_ADDR_LEN];
+ unsigned char mac[ETH_ALEN];
union {
struct {
unsigned int addr;
@@ -69,20 +69,21 @@ struct qeth_ipato_entry {
extern const struct attribute_group *qeth_l3_attr_groups[];
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
-void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
- u8 *, int);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+ enum qeth_prot_versions proto, u8 *addr,
+ int mask_bits);
int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
- const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr);
+void qeth_l3_update_ipato(struct qeth_card *card);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6a73894b0cb5..b0c888e86cd4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -18,15 +18,20 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
#include <net/ip6_fib.h>
#include <net/ip6_checksum.h>
#include <net/iucv/af_iucv.h>
@@ -37,99 +42,22 @@
static int qeth_l3_set_offline(struct ccwgroup_device *);
static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
static int qeth_l3_register_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
static int qeth_l3_deregister_addr_entry(struct qeth_card *,
struct qeth_ipaddr *);
-static int qeth_l3_isxdigit(char *buf)
-{
- while (*buf) {
- if (!isxdigit(*buf++))
- return 0;
- }
- return 1;
-}
-
static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%pI4", addr);
}
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
- int count = 0, rc = 0;
- unsigned int in[4];
- char c;
-
- rc = sscanf(buf, "%u.%u.%u.%u%c",
- &in[0], &in[1], &in[2], &in[3], &c);
- if (rc != 4 && (rc != 5 || c != '\n'))
- return -EINVAL;
- for (count = 0; count < 4; count++) {
- if (in[count] > 255)
- return -EINVAL;
- addr[count] = in[count];
- }
- return 0;
-}
-
static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
{
sprintf(buf, "%pI6", addr);
}
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
- const char *end, *end_tmp, *start;
- __u16 *in;
- char num[5];
- int num2, cnt, out, found, save_cnt;
- unsigned short in_tmp[8] = {0, };
-
- cnt = out = found = save_cnt = num2 = 0;
- end = start = buf;
- in = (__u16 *) addr;
- memset(in, 0, 16);
- while (*end) {
- end = strchr(start, ':');
- if (end == NULL) {
- end = buf + strlen(buf);
- end_tmp = strchr(start, '\n');
- if (end_tmp != NULL)
- end = end_tmp;
- out = 1;
- }
- if ((end - start)) {
- memset(num, 0, 5);
- if ((end - start) > 4)
- return -EINVAL;
- memcpy(num, start, end - start);
- if (!qeth_l3_isxdigit(num))
- return -EINVAL;
- sscanf(start, "%x", &num2);
- if (found)
- in_tmp[save_cnt++] = num2;
- else
- in[cnt++] = num2;
- if (out)
- break;
- } else {
- if (found)
- return -EINVAL;
- found = 1;
- }
- start = ++end;
- }
- if (cnt + save_cnt > 8)
- return -EINVAL;
- cnt = 7;
- while (save_cnt)
- in[cnt--] = in_tmp[--save_cnt];
- return 0;
-}
-
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
char *buf)
{
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
qeth_l3_ipaddr6_to_string(addr, buf);
}
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
- __u8 *addr)
-{
- if (proto == QETH_PROT_IPV4)
- return qeth_l3_string_to_ipaddr4(buf, addr);
- else if (proto == QETH_PROT_IPV6)
- return qeth_l3_string_to_ipaddr6(buf, addr);
- else
- return -EINVAL;
-}
-
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{
int i, j;
@@ -164,8 +81,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
}
}
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
- struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
{
struct qeth_ipato_entry *ipatoe;
u8 addr_bits[128] = {0, };
@@ -174,6 +91,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
if (!card->ipato.enabled)
return 0;
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ return 0;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -205,8 +124,8 @@ inline int
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
return addr1->proto == addr2->proto &&
- !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
- !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+ !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+ ether_addr_equal_64bits(addr1->mac, addr2->mac);
}
static struct qeth_ipaddr *
@@ -290,8 +209,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
addr->ref_counter = 1;
- if (addr->type == QETH_IP_TYPE_NORMAL &&
- qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
@@ -445,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+ ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
if (addr->proto == QETH_PROT_IPV6)
memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
sizeof(struct in6_addr));
@@ -581,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
int rc = 0;
QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
if (!qeth_is_supported(card, IPA_IPV6))
return 0;
@@ -598,13 +515,33 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
" on %s. Type set to 'no router'.\n", rc,
QETH_CARD_IFNAME(card));
}
-#endif
return rc;
}
/*
* IP address takeover related functions
*/
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ unsigned int i;
+
+ hash_for_each(card->ip_htable, i, addr, hnode) {
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ continue;
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+ else
+ addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+}
+
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
@@ -616,6 +553,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
kfree(ipatoe);
}
+ qeth_l3_update_ipato(card);
spin_unlock_bh(&card->ip_lock);
}
@@ -640,18 +578,22 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
}
- if (!rc)
+ if (!rc) {
list_add_tail(&new->entry, &card->ipato.entries);
+ qeth_l3_update_ipato(card);
+ }
spin_unlock_bh(&card->ip_lock);
return rc;
}
-void qeth_l3_del_ipato_entry(struct qeth_card *card,
- enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+ enum qeth_prot_versions proto, u8 *addr,
+ int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
+ int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
@@ -664,11 +606,14 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
(proto == QETH_PROT_IPV4)? 4:16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
+ qeth_l3_update_ipato(card);
kfree(ipatoe);
+ rc = 0;
}
}
spin_unlock_bh(&card->ip_lock);
+ return rc;
}
/*
@@ -678,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- int rc = 0;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -702,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
- qeth_l3_add_ip(card, ipaddr);
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
@@ -711,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
return rc;
}
-void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
- const u8 *addr)
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -729,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
}
ipaddr->type = QETH_IP_TYPE_VIPA;
} else
- return;
+ return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, ipaddr);
+ rc = qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
+ return rc;
}
/*
@@ -745,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- int rc = 0;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -770,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
- qeth_l3_add_ip(card, ipaddr);
+ rc = qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
@@ -779,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
return rc;
}
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
- const u8 *addr)
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
+ int rc;
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
@@ -797,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
}
ipaddr->type = QETH_IP_TYPE_RXIP;
} else
- return;
+ return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, ipaddr);
+ rc = qeth_l3_delete_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock);
kfree(ipaddr);
+ return rc;
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -870,27 +819,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
return rc;
}
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
- if (cast_type == RTN_MULTICAST)
- return QETH_CAST_MULTICAST;
- if (cast_type == RTN_BROADCAST)
- return QETH_CAST_BROADCAST;
- return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
- u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
- if (cast_type == RTN_MULTICAST)
- return ct | QETH_CAST_MULTICAST;
- if (cast_type == RTN_ANYCAST)
- return ct | QETH_CAST_ANYCAST;
- if (cast_type == RTN_BROADCAST)
- return ct | QETH_CAST_BROADCAST;
- return ct | QETH_CAST_UNICAST;
-}
-
static int qeth_l3_setadapter_parms(struct qeth_card *card)
{
int rc = 0;
@@ -907,7 +835,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
-#ifdef CONFIG_QETH_IPV6
static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
{
@@ -923,7 +850,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
qeth_setassparms_cb, NULL);
return rc;
}
-#endif
static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
@@ -1019,7 +945,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
return rc;
}
-#ifdef CONFIG_QETH_IPV6
static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
int rc;
@@ -1065,12 +990,9 @@ out:
dev_info(&card->gdev->dev, "IPV6 enabled\n");
return 0;
}
-#endif
static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
- int rc = 0;
-
QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1078,10 +1000,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
return 0;
}
-#ifdef CONFIG_QETH_IPV6
- rc = qeth_l3_softsetup_ipv6(card);
-#endif
- return rc ;
+ return qeth_l3_softsetup_ipv6(card);
}
static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1153,8 +1072,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
- memcpy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+ ether_addr_copy(card->dev->dev_addr,
+ cmd->data.create_destroy_addr.unique_id);
else
eth_random_addr(card->dev->dev_addr);
@@ -1302,81 +1221,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
- ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- int i;
-
- hash_for_each(card->ip_mc_htable, i, addr, hnode)
- addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- struct hlist_node *tmp;
- int i;
- int rc;
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
- rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_LAN_OFFLINE))
- addr->ref_counter = 1;
- else {
- hash_del(&addr->hnode);
- kfree(addr);
- }
- }
- }
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
- struct qeth_ipaddr *addr;
- struct hlist_node *tmp;
- int i;
- int rc;
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
- rc = qeth_l3_deregister_addr_entry(card, addr);
- if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
- hash_del(&addr->hnode);
- kfree(addr);
- }
- }
- }
-
-}
-
-
static void
qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
{
struct ip_mc_list *im4;
struct qeth_ipaddr *tmp, *ipm;
- char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!tmp)
- return;
+ if (!tmp)
+ return;
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
- qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+ ip_eth_mc_map(im4->multiaddr, tmp->mac);
tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
- memcpy(tmp->mac, buf, sizeof(tmp->mac));
tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1386,7 +1246,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!ipm)
continue;
- memcpy(ipm->mac, buf, sizeof(tmp->mac));
+ ether_addr_copy(ipm->mac, tmp->mac);
ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
ipm->is_multicast = 1;
ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1440,25 +1300,21 @@ unlock:
rcu_read_unlock();
}
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+ struct inet6_dev *in6_dev)
{
struct qeth_ipaddr *ipm;
struct ifmcaddr6 *im6;
struct qeth_ipaddr *tmp;
- char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc6");
tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (!tmp)
- return;
+ if (!tmp)
+ return;
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
- memcpy(tmp->mac, buf, sizeof(tmp->mac));
+ ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
tmp->is_multicast = 1;
@@ -1473,7 +1329,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
if (!ipm)
continue;
- memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+ ether_addr_copy(ipm->mac, tmp->mac);
memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
ipm->is_multicast = 1;
@@ -1534,7 +1390,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
rcu_read_unlock();
in6_dev_put(in6_dev);
}
-#endif /* CONFIG_QETH_IPV6 */
static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
unsigned short vid)
@@ -1574,9 +1429,8 @@ out:
}
static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
- unsigned short vid)
+ unsigned short vid)
{
-#ifdef CONFIG_QETH_IPV6
struct inet6_dev *in6_dev;
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
@@ -1611,7 +1465,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
kfree(addr);
out:
in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
}
static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1646,44 +1499,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
- qeth_l3_set_multicast_list(card->dev);
+ qeth_l3_set_rx_mode(dev);
return 0;
}
static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
- __u16 prot;
- struct iphdr *ip_hdr;
- unsigned char tg_addr[MAX_ADDR_LEN];
-
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
- prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
- ETH_P_IP;
+ u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+ ETH_P_IP;
+ unsigned char tg_addr[ETH_ALEN];
+
+ skb_reset_network_header(skb);
switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
case QETH_CAST_MULTICAST:
- switch (prot) {
-#ifdef CONFIG_QETH_IPV6
- case ETH_P_IPV6:
- ndisc_mc_map((struct in6_addr *)
- skb->data + 24,
- tg_addr, card->dev, 0);
- break;
-#endif
- case ETH_P_IP:
- ip_hdr = (struct iphdr *)skb->data;
- ip_eth_mc_map(ip_hdr->daddr, tg_addr);
- break;
- default:
- memcpy(tg_addr, card->dev->broadcast,
- card->dev->addr_len);
- }
+ if (prot == ETH_P_IP)
+ ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+ else
+ ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
card->stats.multicast++;
skb->pkt_type = PACKET_MULTICAST;
break;
case QETH_CAST_BROADCAST:
- memcpy(tg_addr, card->dev->broadcast,
- card->dev->addr_len);
+ ether_addr_copy(tg_addr, card->dev->broadcast);
card->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
break;
@@ -1695,12 +1535,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
skb->pkt_type = PACKET_OTHERHOST;
else
skb->pkt_type = PACKET_HOST;
- memcpy(tg_addr, card->dev->dev_addr,
- card->dev->addr_len);
+ ether_addr_copy(tg_addr, card->dev->dev_addr);
}
if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
card->dev->header_ops->create(skb, card->dev, prot,
- tg_addr, &hdr->hdr.l3.dest_addr[2],
+ tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
card->dev->addr_len);
else
card->dev->header_ops->create(skb, card->dev, prot,
@@ -1715,7 +1554,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
hdr->hdr.l3.vlan_id :
- *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ hdr->hdr.l3.next_hop.rx.vlan_id;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
@@ -1923,26 +1762,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
}
}
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i, rc;
QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
if (!card->options.sniffer) {
-
spin_lock_bh(&card->mclock);
- qeth_l3_mark_all_mc_to_be_deleted(card);
-
qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
qeth_l3_add_multicast_ipv6(card);
-#endif
- qeth_l3_delete_nonused_mc(card);
- qeth_l3_add_all_new_mc(card);
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ switch (addr->disp_flag) {
+ case QETH_DISP_ADDR_DELETE:
+ rc = qeth_l3_deregister_addr_entry(card, addr);
+ if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+ break;
+ case QETH_DISP_ADDR_ADD:
+ rc = qeth_l3_register_addr_entry(card, addr);
+ if (rc && rc != IPA_RC_LAN_OFFLINE) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ break;
+ }
+ addr->ref_counter = 1;
+ /* fall through */
+ default:
+ /* for next call to set_rx_mode(): */
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+ }
+ }
spin_unlock_bh(&card->mclock);
@@ -2211,12 +2070,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
rc = -EFAULT;
goto free_and_out;
}
-#ifdef CONFIG_QETH_IPV6
if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
/* fails in case of GuestLAN QDIO mode */
qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
}
-#endif
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
QETH_CARD_TEXT(card, 4, "qactf");
rc = -EFAULT;
@@ -2396,9 +2253,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return rc;
}
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
{
- int cast_type = RTN_UNSPEC;
struct neighbour *n = NULL;
struct dst_entry *dst;
@@ -2407,48 +2263,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
if (dst)
n = dst_neigh_lookup_skb(dst, skb);
if (n) {
- cast_type = n->type;
+ int cast_type = n->type;
+
rcu_read_unlock();
neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
return cast_type;
- else
- return RTN_UNSPEC;
+ return RTN_UNSPEC;
}
rcu_read_unlock();
- /* try something else */
+ /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
- return (skb_network_header(skb)[24] == 0xff) ?
- RTN_MULTICAST : 0;
+ return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+ RTN_MULTICAST : RTN_UNSPEC;
else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
- return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
- RTN_MULTICAST : 0;
- /* ... */
- if (!memcmp(skb->data, skb->dev->broadcast, 6))
+ return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+ RTN_MULTICAST : RTN_UNSPEC;
+
+ /* ... and MAC address */
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
return RTN_BROADCAST;
- else {
- u16 hdr_mac;
-
- hdr_mac = *((u16 *)skb->data);
- /* tr multicast? */
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_HSTR:
- case QETH_LINK_TYPE_LANE_TR:
- if ((hdr_mac == QETH_TR_MAC_NC) ||
- (hdr_mac == QETH_TR_MAC_C))
- return RTN_MULTICAST;
- break;
- /* eth or so multicast? */
- default:
- if ((hdr_mac == QETH_ETH_MAC_V4) ||
- (hdr_mac == QETH_ETH_MAC_V6))
- return RTN_MULTICAST;
- }
- }
- return cast_type;
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+ return RTN_MULTICAST;
+
+ /* default to unicast */
+ return RTN_UNSPEC;
}
static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2468,17 +2310,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
daddr[0] = 0xfe;
daddr[1] = 0x80;
memcpy(&daddr[8], iucv_hdr->destUserID, 8);
- memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+ memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
}
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type)
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
- struct dst_entry *dst;
+ if (cast_type == RTN_MULTICAST)
+ return QETH_CAST_MULTICAST;
+ if (cast_type == RTN_ANYCAST)
+ return QETH_CAST_ANYCAST;
+ if (cast_type == RTN_BROADCAST)
+ return QETH_CAST_BROADCAST;
+ return QETH_CAST_UNICAST;
+}
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type,
+ unsigned int data_len)
+{
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.ext_flags = 0;
+ hdr->hdr.l3.length = data_len;
/*
* before we're going to overwrite this location with next hop ip.
@@ -2492,44 +2344,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
}
- hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+ /* OSA only: */
+ if (!ipv) {
+ hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+ skb->dev->broadcast))
+ hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+ else
+ hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+ QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+ return;
+ }
+ hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
rcu_read_lock();
- dst = skb_dst(skb);
if (ipv == 4) {
- struct rtable *rt = (struct rtable *) dst;
- __be32 *pkey = &ip_hdr(skb)->daddr;
+ struct rtable *rt = skb_rtable(skb);
- if (rt && rt->rt_gateway)
- pkey = &rt->rt_gateway;
-
- /* IPv4 */
- hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
- memset(hdr->hdr.l3.dest_addr, 0, 12);
- *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
- } else if (ipv == 6) {
- struct rt6_info *rt = (struct rt6_info *) dst;
- struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+ *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+ rt_nexthop(rt, ip_hdr(skb)->daddr) :
+ ip_hdr(skb)->daddr;
+ } else {
+ /* IPv6 */
+ const struct rt6_info *rt = skb_rt6_info(skb);
+ const struct in6_addr *next_hop;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- pkey = &rt->rt6i_gateway;
+ next_hop = &rt->rt6i_gateway;
+ else
+ next_hop = &ipv6_hdr(skb)->daddr;
+ memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
- /* IPv6 */
- hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
- if (card->info.type == QETH_CARD_TYPE_IQD)
- hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
- memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
- } else {
- if (!memcmp(skb->data + sizeof(struct qeth_hdr),
- skb->dev->broadcast, 6)) {
- /* broadcast? */
- hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
- QETH_HDR_PASSTHRU;
- } else {
- hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
- QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
- QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
- }
+ hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
}
@@ -2561,7 +2409,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
- hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
@@ -2629,7 +2476,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
- int cast_type = qeth_l3_get_cast_type(card, skb);
+ int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_qdio_out_q *queue =
card->qdio.out_qs[card->qdio.do_prio_queueing
|| (cast_type && card->info.is_multicast_different) ?
@@ -2722,21 +2569,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (use_tso) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len - sizeof(struct qeth_hdr_tso));
qeth_tso_fill_header(card, hdr, new_skb);
hdr_elements++;
} else {
if (data_offset < 0) {
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type);
+ qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+ new_skb->len -
+ sizeof(struct qeth_hdr));
} else {
if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
else {
qeth_l3_fill_header(card, hdr, new_skb, ipv,
- cast_type);
- hdr->hdr.l3.length = new_skb->len - data_offset;
+ cast_type,
+ new_skb->len - data_offset);
}
}
@@ -2904,7 +2753,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
@@ -2921,7 +2770,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = qeth_l3_set_multicast_list,
+ .ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
.ndo_fix_features = qeth_fix_features,
@@ -3119,7 +2968,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
__qeth_l3_open(card->dev);
else
dev_open(card->dev);
- qeth_l3_set_multicast_list(card->dev);
+ qeth_l3_set_rx_mode(card->dev);
qeth_recover_features(card->dev);
rtnl_unlock();
}
@@ -3345,10 +3194,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
NULL,
};
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
static int qeth_l3_ip6_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -3393,7 +3238,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
qeth_l3_ip6_event,
NULL,
};
-#endif
static int qeth_l3_register_notifiers(void)
{
@@ -3403,35 +3247,25 @@ static int qeth_l3_register_notifiers(void)
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
-#ifdef CONFIG_QETH_IPV6
rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
if (rc) {
unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
return rc;
}
-#else
- pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
return 0;
}
static void qeth_l3_unregister_notifiers(void)
{
-
QETH_DBF_TEXT(SETUP, 5, "unregnot");
WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
}
static int __init qeth_l3_init(void)
{
- int rc = 0;
-
pr_info("register layer 3 discipline\n");
- rc = qeth_l3_register_notifiers();
- return rc;
+ return qeth_l3_register_notifiers();
}
static void __exit qeth_l3_exit(void)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf678be..a645cfe66ddf 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -10,11 +10,23 @@
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <linux/hashtable.h>
+#include <linux/inet.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
+static int qeth_l3_string_to_ipaddr(const char *buf,
+ enum qeth_prot_versions proto, u8 *addr)
+{
+ const char *end;
+
+ if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+ (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+ return -EINVAL;
+ return 0;
+}
+
static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
struct qeth_routing_info *route, char *buf)
{
@@ -262,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *addr;
char *tmp;
- int i;
+ int rc, i;
if (!card)
return -EINVAL;
@@ -331,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
return -ENOMEM;
spin_lock_bh(&card->ip_lock);
- qeth_l3_add_ip(card, addr);
+ rc = qeth_l3_add_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
kfree(addr);
- return count;
+ return rc ? rc : count;
}
static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
@@ -370,8 +382,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- struct qeth_ipaddr *addr;
- int i, rc = 0;
+ bool enable;
+ int rc = 0;
if (!card)
return -EINVAL;
@@ -384,25 +396,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
}
if (sysfs_streq(buf, "toggle")) {
- card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
- } else if (sysfs_streq(buf, "1")) {
- card->ipato.enabled = 1;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if ((addr->type == QETH_IP_TYPE_NORMAL) &&
- qeth_l3_is_addr_covered_by_ipato(card, addr))
- addr->set_flags |=
- QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else if (sysfs_streq(buf, "0")) {
- card->ipato.enabled = 0;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if (addr->set_flags &
- QETH_IPA_SETIP_TAKEOVER_FLAG)
- addr->set_flags &=
- ~QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else
+ enable = !card->ipato.enabled;
+ } else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.enabled != enable) {
+ card->ipato.enabled = enable;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -428,20 +433,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert4 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert4 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert4;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert4 != invert) {
+ card->ipato.invert4 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -573,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
- qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+ rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -607,20 +619,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert6 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert6 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert6;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert6 != invert) {
+ card->ipato.invert6 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -686,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = {
.attrs = qeth_ipato_device_attrs,
};
-static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
- enum qeth_prot_versions proto)
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+ enum qeth_prot_versions proto,
+ enum qeth_ip_types type)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *ipaddr;
char addr_str[40];
int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i;
+ if (!card)
+ return -EINVAL;
+
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- if (ipaddr->proto != proto)
- continue;
- if (ipaddr->type != QETH_IP_TYPE_VIPA)
+ if (ipaddr->proto != proto || ipaddr->type != type)
continue;
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
@@ -720,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+ QETH_IP_TYPE_VIPA);
}
static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
@@ -777,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
if (!rc)
- qeth_l3_del_vipa(card, proto, addr);
+ rc = qeth_l3_del_vipa(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -797,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
qeth_l3_dev_vipa_del4_store);
static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+ QETH_IP_TYPE_VIPA);
}
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
@@ -849,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = {
.attrs = qeth_vipa_device_attrs,
};
-static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
- enum qeth_prot_versions proto)
-{
- struct qeth_ipaddr *ipaddr;
- char addr_str[40];
- int str_len = 0;
- int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i;
-
- entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
- entry_len += 2; /* \n + terminator */
- spin_lock_bh(&card->ip_lock);
- hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- if (ipaddr->proto != proto)
- continue;
- if (ipaddr->type != QETH_IP_TYPE_RXIP)
- continue;
- /* String must not be longer than PAGE_SIZE. So we check if
- * string length gets near PAGE_SIZE. Then we can savely display
- * the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - str_len) <= entry_len)
- break;
- qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
- addr_str);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
- addr_str);
- }
- spin_unlock_bh(&card->ip_lock);
- str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
-
- return str_len;
-}
-
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+ QETH_IP_TYPE_RXIP);
}
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
@@ -957,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
- qeth_l3_del_rxip(card, proto, addr);
+ rc = qeth_l3_del_rxip(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -977,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
qeth_l3_dev_rxip_del4_store);
static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- if (!card)
- return -EINVAL;
-
- return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+ return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+ QETH_IP_TYPE_RXIP);
}
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
schedulertime = cpu_to_le32(schedulertime % 604800);
memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
TW_Command_Full *full_command_packet;
TW_Compatibility_Info *tw_compat_info;
TW_Event *event;
- struct timeval current_time;
- u32 current_time_ms;
+ ktime_t current_time;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
break;
case TW_IOCTL_GET_LOCK:
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
- do_gettimeofday(&current_time);
- current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+ current_time = ktime_get();
- if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
+ ktime_after(current_time, tw_dev->ioctl_time)) {
tw_dev->ioctl_sem_lock = 1;
- tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
tw_ioctl->driver_command.status = 0;
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
} else {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
- tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
}
break;
case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char event_queue_wrapped;
unsigned int error_sequence_id;
int ioctl_sem_lock;
- u32 ioctl_msec;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
- schedulertime = cpu_to_le32(schedulertime % 604800);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
+ schedulertime = cpu_to_le32(schedulertime);
memcpy(param->data, &schedulertime, sizeof(u32));
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));
if (sup_adap_info->adapter_type_text[0]) {
- char *cp = sup_adap_info->adapter_type_text;
int c;
+ char *cp;
+ char *cname = kmemdup(sup_adap_info->adapter_type_text,
+ sizeof(sup_adap_info->adapter_type_text),
+ GFP_ATOMIC);
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
(void *) cmd);
}
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+ struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
{
- struct fib *fibptr;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- struct aac_ciss_identify_pd *identify_resp;
- dma_addr_t addr;
- u32 vbus, vid;
- u16 fibsize, datasize;
- int rcode = -ENOMEM;
-
+ struct fib *fibptr;
+ dma_addr_t addr;
+ int rcode;
+ int fibsize;
+ struct aac_srb *srb;
+ struct aac_srb_reply *srb_reply;
+ struct sgmap64 *sg64;
+ u32 vbus;
+ u32 vid;
+
+ if (!dev->sa_firmware)
+ return 0;
+ /* allocate FIB */
fibptr = aac_fib_alloc(dev);
if (!fibptr)
- goto out;
+ return -ENOMEM;
- fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_identify_pd);
+ aac_fib_init(fibptr);
+ fibptr->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
- identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (!identify_resp)
- goto fib_free_ptr;
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ sizeof(struct sgentry64);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+ /* allocate DMA buffer for response */
+ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ rcode = -ENOMEM;
+ goto fib_error;
+ }
- aac_fib_init(fibptr);
+ srb = fib_data(fibptr);
+ memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ vbus = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_target);
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = 0x26;
- srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
- srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+ /* set the common request fields */
+ srb->channel = cpu_to_le32(vbus);
+ srb->id = cpu_to_le32(vid);
+ srb->lun = 0;
+ srb->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srb->timeout = 0;
+ srb->retry_limit = 0;
+ srb->cdb_size = cpu_to_le32(16);
+ srb->count = cpu_to_le32(xfer_len);
+
+ sg64 = (struct sgmap64 *)&srb->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(xfer_len);
- sg64 = (struct sgmap64 *)&srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
- sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- sg64->sg[0].count = cpu_to_le32(datasize);
+ /*
+ * Copy the updated data for other dumping or other usage if needed
+ */
+ memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+ /* issue request to the controller */
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+ 1, 1, NULL, NULL);
+
+ if (rcode == -ERESTARTSYS)
+ rcode = -ERESTART;
+
+ if (unlikely(rcode < 0))
+ goto bmic_error;
+
+ srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+ memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+ dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+ struct aac_ciss_identify_pd *identify_resp;
- rcode = aac_fib_send(ScsiPortCommand64,
- fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+ if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+ return;
+
+ identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+ if (identify_resp == NULL) {
+ dev->hba_map[bus][target].qd_limit = 32;
+ return;
+ }
if (identify_resp->current_queue_depth_limit <= 0 ||
- identify_resp->current_queue_depth_limit > 32)
+ identify_resp->current_queue_depth_limit > 255)
dev->hba_map[bus][target].qd_limit = 32;
else
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
+}
- dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+ struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb_unit srbu;
+ struct aac_srb *srbcmd;
+ struct aac_ciss_identify_pd *identify_reply;
- aac_fib_complete(fibptr);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+ identify_reply = kmalloc(datasize, GFP_KERNEL);
+ if (!identify_reply)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ *identify_resp = identify_reply;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(identify_reply);
+ goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+ kfree(dev->safw_phys_luns);
+ dev->safw_phys_luns = NULL;
+}
+
+/**
+ * aac_get_safw_ciss_luns() Process topology change
+ * @dev: aac_dev structure
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb *srbcmd;
+ struct aac_srb_unit srbu;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+
+ datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ phys_luns = kmalloc(datasize, GFP_KERNEL);
+ if (phys_luns == NULL)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ if (phys_luns->resp_flag != 2) {
+ rcode = -ENOMSG;
+ goto mem_free_all;
+ }
+
+ dev->safw_phys_luns = phys_luns;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(phys_luns);
+ goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+ return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+ return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+ int bus, int target)
+{
+ kfree(dev->hba_map[bus][target].safw_identify_resp);
+ dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+ int lun_count)
+{
+ int luns;
+ int i;
+ u32 bus;
+ u32 target;
+
+ luns = aac_get_safw_phys_lun_count(dev);
+
+ if (luns < lun_count)
+ lun_count = luns;
+ else if (lun_count < 0)
+ lun_count = luns;
+
+ for (i = 0; i < lun_count; i++) {
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ aac_free_safw_identify_resp(dev, bus, target);
+ }
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+ int i;
+ int rcode = 0;
+ u32 lun_count;
+ u32 bus;
+ u32 target;
+ struct aac_ciss_identify_pd *identify_resp = NULL;
+
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ rcode = aac_issue_safw_bmic_identify(dev,
+ &identify_resp, bus, target);
+
+ if (unlikely(rcode < 0))
+ goto free_identify_resp;
+
+ dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+ }
-fib_free_ptr:
- aac_fib_free(fibptr);
out:
return rcode;
+free_identify_resp:
+ aac_free_safw_all_identify_resp(dev, i);
+ goto out;
}
/**
- * aac_update hba_map()- update current hba map with data from FW
+ * aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
* @phys_luns: FW information from report phys luns
+ * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
-void aac_update_hba_map(struct aac_dev *dev,
- struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
{
/* ok and extended reporting */
u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
u8 expose_flag, attribs;
u8 devtype;
- lun_count = ((phys_luns->list_length[0] << 24)
- + (phys_luns->list_length[1] << 16)
- + (phys_luns->list_length[2] << 8)
- + (phys_luns->list_length[3])) / 24;
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ dev->scan_counter++;
for (i = 0; i < lun_count; ++i) {
- bus = phys_luns->lun[i].level2[1] & 0x3f;
- target = phys_luns->lun[i].level2[0];
- expose_flag = phys_luns->lun[i].bus >> 6;
- attribs = phys_luns->lun[i].node_ident[9];
- nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+ expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+ attribs = aac_get_safw_phys_attribs(dev, i);
+ nexus = aac_get_safw_phys_nexus(dev, i);
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
continue;
- dev->hba_map[bus][target].expose = expose_flag;
-
if (expose_flag != 0) {
devtype = AAC_DEVTYPE_RAID_MEMBER;
goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
} else
devtype = AAC_DEVTYPE_ARC_RAW;
- if (devtype != AAC_DEVTYPE_NATIVE_RAW)
- goto update_devtype;
+ dev->hba_map[bus][target].scan_counter = dev->scan_counter;
- if (aac_issue_bmic_identify(dev, bus, target) < 0)
- dev->hba_map[bus][target].qd_limit = 32;
+ aac_set_safw_target_qd(dev, bus, target);
update_devtype:
- if (rescan == AAC_INIT)
- dev->hba_map[bus][target].devtype = devtype;
- else
- dev->hba_map[bus][target].new_devtype = devtype;
+ dev->hba_map[bus][target].devtype = devtype;
}
}
-/**
- * aac_report_phys_luns() Process topology change
- * @dev: aac_dev structure
- * @fibptr: fib pointer
- *
- * Execute a CISS REPORT PHYS LUNS and process the results into
- * the current hba_map.
- */
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+static int aac_setup_safw_targets(struct aac_dev *dev)
{
- int fibsize, datasize;
- struct aac_ciss_phys_luns_resp *phys_luns;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- dma_addr_t addr;
- u32 vbus, vid;
int rcode = 0;
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
- + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_phys_luns_resp)
- + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
-
- phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (phys_luns == NULL) {
- rcode = -ENOMEM;
- goto err_out;
- }
-
- vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_bus);
- vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_target);
+ rcode = aac_get_containers(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- aac_fib_init(fibptr);
+ rcode = aac_get_safw_ciss_luns(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ rcode = aac_get_safw_attr_all_targets(dev);
+ if (unlikely(rcode < 0))
+ goto free_ciss_luns;
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
- srbcmd->cdb[1] = 2; /* extended reporting */
- srbcmd->cdb[8] = (u8)(datasize >> 8);
- srbcmd->cdb[9] = (u8)(datasize);
-
- sg64 = (struct sgmap64 *) &srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
- sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
- sg64->sg[0].count = cpu_to_le32(datasize);
-
- rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
- FsaNormal, 1, 1, NULL, NULL);
-
- /* analyse data */
- if (rcode >= 0 && phys_luns->resp_flag == 2) {
- /* ok and extended reporting */
- aac_update_hba_map(dev, phys_luns, rescan);
- }
+ aac_set_safw_attr_all_targets(dev);
- dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
-err_out:
+ aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+ aac_free_safw_ciss_luns(dev);
+out:
return rcode;
}
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+ return aac_setup_safw_targets(dev);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.virt_device_bus != 0xffff) {
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
- }
-
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
} else { /* check for physical non-dasd devices */
bus = aac_logical_to_phys(scmd_channel(scsicmd));
- if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
- (dev->hba_map[bus][cid].expose
- == AAC_HIDE_DISK)){
- if (scsicmd->cmnd[0] == INQUIRY) {
- scsicmd->result = DID_NO_CONNECT << 16;
- goto scsi_done_ret;
- }
- }
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 6e3d81969a77..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <scsi/scsi_host.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -97,7 +98,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50834
+# define AAC_DRIVER_BUILD 50877
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
#define AAC_MAX_BUSES 5
#define AAC_MAX_TARGETS 256
+#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define get_bus_number(x) (x/AAC_MAX_TARGETS)
+#define get_target_number(x) (x%AAC_MAX_TARGETS)
+
/* Thor AIF events */
#define SA_AIF_HOTPLUG (1<<1)
#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
#define AAC_DEVTYPE_RAID_MEMBER 1
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_EXPOSE_DISK 0
-#define AAC_HIDE_DISK 3
+
+#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 new_devtype;
u8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
- u8 expose; /*checks if to expose or not*/
+ u32 scan_counter;
+ struct aac_ciss_identify_pd *safw_identify_resp;
};
/*
@@ -1560,6 +1565,7 @@ struct aac_dev
spinlock_t fib_lock;
struct mutex ioctl_mutex;
+ struct mutex scan_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
+ struct delayed_work safw_rescan_work;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
atomic_t msix_counter;
+ u32 scan_counter;
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+ struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
@@ -1725,6 +1734,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080)
/*
* Define the command values
@@ -2022,6 +2032,12 @@ struct aac_srb_reply
__le32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
+
+struct aac_srb_unit {
+ struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
+};
+
/*
* SRB Flags
*/
@@ -2626,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
return (dev)->a_ops.adapter_check_health(dev);
}
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, safw_rescan_work);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+
+ aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+ if (dev->sa_firmware)
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
+void aac_safw_rescan_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
+int aac_setup_safw_adapter(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
return -EFAULT;
+ dev->adapter_shutdown = 1;
+
+ mutex_unlock(&dev->ioctl_mutex);
retval = aac_reset_adapter(dev, 0, reset.reset_type);
- return retval;
+ mutex_lock(&dev->ioctl_mutex);
+ return retval;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+ unsigned long flagv = 0;
+ int i = 0;
+
+ for (i = 60; i; --i) {
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ int active = 0;
+
+ __shost_for_each_device(dev, aac->scsi_host_ptr) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+}
+
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
- int status;
+ int status = 0;
- fibctx = aac_fib_alloc(dev);
- if (!fibctx)
- return -ENOMEM;
- aac_fib_init(fibctx);
+ if (aac_adapter_check_health(dev))
+ return status;
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
mutex_unlock(&dev->ioctl_mutex);
}
+ aac_wait_for_io_completion(dev);
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index bec9f3193f60..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->scsi_done(command);
}
/*
- * Any Device that was already marked offline needs to be cleaned up
+ * Any Device that was already marked offline needs to be marked
+ * running
*/
__shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev)) {
- sdev_printk(KERN_INFO, dev, "Removing offline device\n");
- scsi_remove_device(dev);
- scsi_device_put(dev);
- }
+ if (!scsi_device_online(dev))
+ scsi_device_set_state(dev, SDEV_RUNNING);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
- if (!retval) {
- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
- scsi_scan_host(host);
+ if (!retval && !is_kdump_kernel()) {
+ dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+ aac_schedule_safw_scan_worker(aac);
}
+
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
- if (forced < 2) for (retval = 60; retval; --retval) {
- struct scsi_device * dev;
- struct scsi_cmnd * command;
- int active = 0;
-
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
-
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- break;
- ssleep(1);
- }
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
return BlinkLED;
}
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+ return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
-static void aac_resolve_luns(struct aac_dev *dev)
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+ int bus,
+ int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+ if (sdev)
+ scsi_device_put(sdev);
+}
+
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
{
- int bus, target, channel;
struct scsi_device *sdev;
- u8 devtype;
- u8 new_devtype;
- for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
- for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ scsi_remove_device(sdev);
+ aac_put_safw_scsi_device(sdev);
+}
- if (bus == CONTAINER_CHANNEL)
- channel = CONTAINER_CHANNEL;
- else
- channel = aac_phys_to_logical(bus);
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+ int bus, int target)
+{
+ return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
- devtype = dev->hba_map[bus][target].devtype;
- new_devtype = dev->hba_map[bus][target].new_devtype;
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+ if (is_safw_raid_volume(dev, bus, target))
+ return dev->fsa_dev[target].valid;
+ else
+ return aac_is_safw_scan_count_equal(dev, bus, target);
+}
- sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
- target, 0);
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+ int is_exposed = 0;
+ struct scsi_device *sdev;
- if (!sdev && new_devtype)
- scsi_add_device(dev->scsi_host_ptr, channel,
- target, 0);
- else if (sdev && new_devtype != devtype)
- scsi_remove_device(sdev);
- else if (sdev && new_devtype == devtype)
- scsi_rescan_device(&sdev->sdev_gendev);
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ if (sdev)
+ is_exposed = 1;
+ aac_put_safw_scsi_device(sdev);
- if (sdev)
- scsi_device_put(sdev);
+ return is_exposed;
+}
- dev->hba_map[bus][target].devtype = new_devtype;
- }
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+ int i;
+ int bus;
+ int target;
+ int is_exposed = 0;
+ int rcode = 0;
+
+ rcode = aac_setup_safw_adapter(dev);
+ if (unlikely(rcode < 0)) {
+ goto out;
}
+
+ for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+ bus = get_bus_number(i);
+ target = get_target_number(i);
+
+ is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+ if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+ aac_add_safw_device(dev, bus, target);
+ else if (!aac_is_safw_target_valid(dev, bus, target) &&
+ is_exposed)
+ aac_remove_safw_device(dev, bus, target);
+ }
+out:
+ return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ rcode = aac_update_safw_host_devices(dev);
+ if (rcode)
+ aac_schedule_safw_scan_worker(dev);
+
+ return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ mutex_lock(&dev->scan_mutex);
+ if (dev->sa_firmware)
+ rcode = aac_scan_safw_host(dev);
+ else
+ scsi_scan_host(dev->scsi_host_ptr);
+ mutex_unlock(&dev->scan_mutex);
+
+ return rcode;
}
/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
*/
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
{
- int i, bus, target, container, rcode = 0;
+ int i;
u32 events = 0;
- struct fib *fib;
- struct scsi_device *sdev;
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
case SA_AIF_LDEV_CHANGE:
case SA_AIF_BPCFG_CHANGE:
- fib = aac_fib_alloc(dev);
- if (!fib) {
- pr_err("aac_handle_sa_aif: out of memory\n");
- return;
- }
- for (bus = 0; bus < AAC_MAX_BUSES; bus++)
- for (target = 0; target < AAC_MAX_TARGETS; target++)
- dev->hba_map[bus][target].new_devtype = 0;
-
- rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
-
- if (rcode != -ERESTARTSYS)
- aac_fib_free(fib);
-
- aac_resolve_luns(dev);
-
- if (events == SA_AIF_LDEV_CHANGE ||
- events == SA_AIF_BPCFG_CHANGE) {
- aac_get_containers(dev);
- for (container = 0; container <
- dev->maximum_num_containers; ++container) {
- sdev = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- if (dev->fsa_dev[container].valid && !sdev) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- } else if (!dev->fsa_dev[container].valid &&
- sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
- scsi_device_put(sdev);
- }
- }
- }
+ aac_scan_host(dev);
+
break;
case SA_AIF_BPSTAT_CHANGE:
@@ -2482,8 +2502,8 @@ int aac_command_thread(void *data)
/* Synchronize our watches */
if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
&& (now.tv_nsec > (NSEC_PER_SEC / HZ)))
- difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
- + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
+ difference = HZ + HZ / 2 -
+ now.tv_nsec / (NSEC_PER_SEC / HZ);
else {
if (now.tv_nsec > NSEC_PER_SEC / 2)
++now.tv_sec;
@@ -2507,6 +2527,10 @@ int aac_command_thread(void *data)
if (kthread_should_stop())
break;
+ /*
+ * we probably want usleep_range() here instead of the
+ * jiffies computation
+ */
schedule_timeout(difference);
if (kthread_should_stop())
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index bdf127aaab41..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
u32 bus, cid;
int ret = FAILED;
+ if (aac_adapter_check_health(aac))
+ return ret;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct aac_hba_tm_req *tmf;
int status;
u64 address;
- __le32 managed_request_id;
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
(fib->callback_data == cmd)) {
found = 1;
- managed_request_id = ((struct aac_hba_cmd_req *)
- fib->hw_fib_va)->request_id;
break;
}
}
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
info = &aac->hba_map[bus][cid];
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
- int bled = 0;
- struct aac_dev *aac;
-
if (!capable(CAP_SYS_ADMIN))
return retval;
- aac = (struct aac_dev *)class_to_shost(device)->hostdata;
- bled = buf[0] == '!' ? 1:0;
- retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
+ retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+ buf[0] == '!', IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
+
return retval;
}
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
+ mutex_init(&aac->scan_mutex);
+
+ INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
- scsi_scan_host(shost);
+
+ aac_scan_host(aac);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
+ aac_cancel_safw_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ aac_cancel_safw_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
+ aac_cancel_safw_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
scsi_unblock_requests(aac->scsi_host_ptr);
- scsi_scan_host(aac->scsi_host_ptr);
+ aac_scan_host(aac);
pci_save_state(pdev);
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
}
/*
- * Fill in the function dispatch table.
- */
-
- dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
- dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
- dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
- dev->a_ops.adapter_notify = aac_sa_notify_adapter;
- dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
- dev->a_ops.adapter_check_health = aac_sa_check_health;
- dev->a_ops.adapter_restart = aac_sa_restart_adapter;
- dev->a_ops.adapter_start = aac_sa_start_adapter;
- dev->a_ops.adapter_intr = aac_sa_intr;
- dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
- dev->a_ops.adapter_ioremap = aac_sa_ioremap;
-
- /*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#ifdef CONFIG_XEN
- #define ARCMSR_MAX_FREECCB_NUM 160
-#define ARCMSR_MAX_OUTSTANDING_CMD 155
-#else
- #define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_MAX_OUTSTANDING_CMD 255
-#endif
-#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_MAX_XFER_SECTORS 512
-#define ARCMSR_MAX_XFER_SECTORS_B 4096
-#define ARCMSR_MAX_XFER_SECTORS_C 304
-#define ARCMSR_MAX_TARGETID 17
-#define ARCMSR_MAX_TARGETLUN 8
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_QBUFFER 4096
-#define ARCMSR_DEFAULT_SG_ENTRIES 38
-#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_FREECCB_NUM 1024
+#define ARCMSR_MAX_OUTSTANDING_CMD 1024
+#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
+#define ARCMSR_MIN_OUTSTANDING_CMD 32
+#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN 128
+#define ARCMSR_DEFAULT_CMD_PERLUN 32
+#define ARCMSR_MIN_CMD_PERLUN 1
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
-#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
-#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMSR_MAX_HBE_DONEQUEUE 512
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
-#define PCI_DEVICE_ID_ARECA_1880 0x1880
- #endif
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1214
- #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#define PCI_DEVICE_ID_ARECA_1214 0x1214
#endif
#ifndef PCI_DEVICE_ID_ARECA_1203
- #define PCI_DEVICE_ID_ARECA_1203 0x1203
+#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1884
+#define PCI_DEVICE_ID_ARECA_1884 0x1884
+#endif
+#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
**********************************************************************************
**
**********************************************************************************
*/
-#define ARC_SUCCESS 0
-#define ARC_FAILURE 1
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
/*
*******************************************************************************
** split 64bits dma addressing
*******************************************************************************
*/
-#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
/*
*******************************************************************************
** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
-#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
** structure for holding DMA address data
*************************************************************
*/
-#define IS_DMA64 (sizeof(dma_addr_t) == 8)
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- __le32 length;
- __le32 address;
+ __le32 length;
+ __le32 address;
}__attribute__ ((packed));
struct SG64ENTRY
{
- __le32 length;
- __le32 address;
- __le32 addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
}__attribute__ ((packed));
/*
********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
*/
struct FIRMWARE_INFO
{
- uint32_t signature; /*0, 00-03*/
- uint32_t request_len; /*1, 04-07*/
- uint32_t numbers_queue; /*2, 08-11*/
- uint32_t sdram_size; /*3, 12-15*/
- uint32_t ide_channels; /*4, 16-19*/
- char vendor[40]; /*5, 20-59*/
- char model[8]; /*15, 60-67*/
- char firmware_ver[16]; /*17, 68-83*/
- char device_map[16]; /*21, 84-99*/
- uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- uint8_t cfgSerial[16]; /*26,104-119*/
- uint32_t cfgPicStatus; /*30,120-123*/
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
};
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* ccb areca cdb flag */
-#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
/* ARC-1680 Bus Reset*/
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/* ARC-1880 Bus Reset*/
-#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
-#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
-#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
/*
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
*/
-#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
/*
** Set if Outbound Doorbell register bits 30:1 have a non-zero
** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
** Register (FIFO) is not empty. It clears when the Outbound
** Post List FIFO is empty.
*/
-#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
/*
** This bit indicates a SAS interrupt from a source external to
** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
/*inbound message 0 ready*/
-#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
/*more than 12 request completed in a time*/
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
/*outbound DATA WRITE isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
/*outbound DATA READ isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
/*outbound message 0 ready*/
-#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*
*******************************************************************************
+** SPEC. for Areca Type E adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1884 0x188417D3
+
+#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
+
+#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
+#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
+
+/* ARC-1884 doorbell sync */
+#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
+#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
+#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
struct ARCMSR_CDB
{
- uint8_t Bus;
- uint8_t TargetID;
- uint8_t LUN;
- uint8_t Function;
- uint8_t CdbLength;
- uint8_t sgcount;
- uint8_t Flags;
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
#define ARCMSR_CDB_FLAG_BIOS 0x02
#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_HEADQ 0x08
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
- uint8_t msgPages;
- uint32_t msgContext;
- uint32_t DataLength;
- uint8_t Cdb[16];
- uint8_t DeviceStatus;
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
#define ARCMSR_DEV_CHECK_CONDITION 0x02
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
#define ARCMSR_DEV_ABORTED 0xF1
#define ARCMSR_DEV_INIT_FAIL 0xF2
- uint8_t SenseData[15];
+ uint8_t SenseData[15];
union
{
- struct SG32ENTRY sg32entry[1];
- struct SG64ENTRY sg64entry[1];
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
} u;
};
/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t __iomem *drv2iop_doorbell;
- uint32_t __iomem *drv2iop_doorbell_mask;
- uint32_t __iomem *iop2drv_doorbell;
- uint32_t __iomem *iop2drv_doorbell_mask;
- uint32_t __iomem *message_rwbuffer;
- uint32_t __iomem *message_wbuffer;
- uint32_t __iomem *message_rbuffer;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
};
/*
*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
uint32_t diagnostic_rw_data; /*0024 0027*/
uint32_t diagnostic_rw_address_low; /*0028 002B*/
uint32_t diagnostic_rw_address_high; /*002C 002F*/
- uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_status; /*0030 0033*/
uint32_t host_int_mask; /*0034 0037*/
uint32_t dcr_data; /*0038 003B*/
uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
uint32_t iop_int_mask; /*0054 0057*/
uint32_t iop_inbound_queue_port; /*0058 005B*/
uint32_t iop_outbound_queue_port; /*005C 005F*/
- uint32_t inbound_free_list_index; /*0060 0063*/
- uint32_t inbound_post_list_index; /*0064 0067*/
- uint32_t outbound_free_list_index; /*0068 006B*/
- uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
uint32_t inbound_doorbell_clear; /*0070 0073*/
- uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
uint32_t last_used_message_source_address_low; /*0078 007B*/
uint32_t last_used_message_source_address_high; /*007C 007F*/
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
uint32_t utility_A_int_counter_timer; /*0098 009B*/
uint32_t outbound_doorbell; /*009C 009F*/
- uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
uint32_t message_source_address_index; /*00A4 00A7*/
uint32_t message_done_queue_index; /*00A8 00AB*/
uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
- uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
uint32_t write_sequence; /*00FC 00FF*/
uint32_t reserved1[34]; /*0100 0187*/
- uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
uint32_t message_wbuffer[32]; /*2000 207F*/
uint32_t reserved3[32]; /*2080 20FF*/
uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type E processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_E{
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+
+typedef struct deliver_completeQ {
+ uint16_t cmdFlag;
+ uint16_t cmdSMID;
+ uint16_t cmdLMID; // reserved (0)
+ uint16_t cmdFlag2; // reserved (0)
+} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
*/
struct AdapterControlBlock
{
- uint32_t adapter_type; /* adapter A,B..... */
- #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
- #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
- #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
- #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
- u32 roundup_ccbsize;
- struct pci_dev * pdev;
- struct Scsi_Host * host;
- unsigned long vir2phy_offset;
+ uint32_t adapter_type; /* adapter A,B..... */
+#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
/* Offset is used in making arc cdb physical to virtual calculations */
- uint32_t outbound_int_enable;
- uint32_t cdb_phyaddr_hi32;
- uint32_t reg_mu_acc_handle0;
- spinlock_t eh_lock;
- spinlock_t ccblist_lock;
- spinlock_t postq_lock;
- spinlock_t doneq_lock;
- spinlock_t rqbuffer_lock;
- spinlock_t wqbuffer_lock;
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
+ struct MessageUnit_E __iomem *pmuE;
};
/* message unit ATU inbound base address0 */
- void __iomem *mem_base0;
- void __iomem *mem_base1;
- uint32_t acb_flags;
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
u16 dev_id;
- uint8_t adapter_index;
- #define ACB_F_SCSISTOPADAPTER 0x0001
- #define ACB_F_MSG_STOP_BGRB 0x0002
- /* stop RAID background rebuild */
- #define ACB_F_MSG_START_BGRB 0x0004
- /* stop RAID background rebuild */
- #define ACB_F_IOPDATA_OVERFLOW 0x0008
- /* iop message data rqbuffer overflow */
- #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
- /* message clear wqbuffer */
- #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
- /* message clear rqbuffer */
- #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
- #define ACB_F_BUS_RESET 0x0080
- #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+ uint8_t adapter_index;
+#define ACB_F_SCSISTOPADAPTER 0x0001
+#define ACB_F_MSG_STOP_BGRB 0x0002
+/* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004
+/* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008
+/* iop message data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+/* message clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+/* message clear rqbuffer */
+#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+#define ACB_F_BUS_RESET 0x0080
+#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
- #define ACB_F_IOP_INITED 0x0100
- /* iop init */
- #define ACB_F_ABORT 0x0200
- #define ACB_F_FIRMWARE_TRAP 0x0400
- struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+#define ACB_F_IOP_INITED 0x0100
+/* iop init */
+#define ACB_F_ABORT 0x0200
+#define ACB_F_FIRMWARE_TRAP 0x0400
+#define ACB_F_MSG_GET_CONFIG 0x1000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
- struct list_head ccb_free_list;
+ struct list_head ccb_free_list;
/* head of free ccb list */
- atomic_t ccboutstandingcount;
+ atomic_t ccboutstandingcount;
/*The present outstanding command number that in the IOP that
waiting for being handled by FW*/
- void * dma_coherent;
+ void * dma_coherent;
/* dma_coherent used for memory free */
- dma_addr_t dma_coherent_handle;
+ dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle2;
- void *dma_coherent2;
- unsigned int uncache_size;
- uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_getIndex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_putIndex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
- uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_getIndex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_putIndex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
- uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
- uint32_t num_resets;
- uint32_t num_aborts;
- uint32_t signature;
- uint32_t firm_request_len;
- uint32_t firm_numbers_queue;
- uint32_t firm_sdram_size;
- uint32_t firm_hd_channels;
- uint32_t firm_cfg_version;
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
- struct work_struct arcmsr_do_message_isr_bh;
- struct timer_list eternal_timer;
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
unsigned short fw_flag;
- #define FW_NORMAL 0x0000
- #define FW_BOG 0x0001
- #define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
- uint32_t maxOutstanding;
- int vector_count;
+#define FW_NORMAL 0x0000
+#define FW_BOG 0x0001
+#define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int vector_count;
+ uint32_t maxFreeCCB;
+ struct timer_list refresh_timer;
+ uint32_t doneq_index;
+ uint32_t ccbsize;
+ uint32_t in_doorbell;
+ uint32_t out_doorbell;
+ uint32_t completionQ_entry;
+ pCompletion_Q pCompletionQ;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
*/
struct CommandControlBlock{
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
- struct list_head list; /*x32: 8byte, x64: 16byte*/
- struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
- struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
- uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
- uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
- #define CCB_FLAG_READ 0x0000
- #define CCB_FLAG_WRITE 0x0001
- #define CCB_FLAG_ERROR 0x0002
- #define CCB_FLAG_FLUSHCACHE 0x0004
- #define CCB_FLAG_MASTER_ABORTED 0x0008
- uint16_t startdone; /*x32:2byte,x32:2byte*/
- #define ARCMSR_CCB_DONE 0x0000
- #define ARCMSR_CCB_START 0x55AA
- #define ARCMSR_CCB_ABORTED 0xAA55
- #define ARCMSR_CCB_ILLEGAL 0xFFFF
- #if BITS_PER_LONG == 64
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+#define CCB_FLAG_READ 0x0000
+#define CCB_FLAG_WRITE 0x0001
+#define CCB_FLAG_ERROR 0x0002
+#define CCB_FLAG_FLUSHCACHE 0x0004
+#define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+#define ARCMSR_CCB_DONE 0x0000
+#define ARCMSR_CCB_START 0x55AA
+#define ARCMSR_CCB_ABORTED 0xAA55
+#define ARCMSR_CCB_ILLEGAL 0xFFFF
+ uint32_t smid;
+#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[5]; /*24 byte*/
- #else
+ uint32_t reserved[4]; /*16 byte*/
+#else
/* ======================512+32 bytes======================== */
- uint32_t reserved; /*8 byte*/
- #endif
+ // uint32_t reserved; /*4 byte*/
+#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
};
@@ -788,13 +913,13 @@ struct SENSE_DATA
** Outbound Interrupt Status Register - OISR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
-#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
-#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
** Outbound Interrupt Mask Register - OIMR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
-#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
-#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+static int msix_enable = 1;
+module_param(msix_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
+
+static int msi_enable = 1;
+module_param(msi_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
+
+static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+module_param(host_can_queue, int, S_IRUGO);
+MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
+
+static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+module_param(cmd_per_lun, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+
+static int set_date_time = 0;
+module_param(set_date_time, int, S_IRUGO);
+MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
+static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
- .eh_abort_handler = arcmsr_abort,
+ .eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
- .this_id = ARCMSR_SCSI_INITIATOR_ID,
- .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
- .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
- .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
- .id_table = arcmsr_device_id_table,
+ .id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
- case ACB_ADAPTER_TYPE_D: {
+ case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E: {
dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->mem_base0 = mem_base0;
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ acb->pmuE = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!acb->pmuE) {
+ pr_notice("arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
+ case ACB_ADAPTER_TYPE_E:
+ iounmap(acb->pmuE);
+ break;
}
}
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
+static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ int i;
+ uint32_t read_doorbell;
+ struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
+
+ for (i = 0; i < 2000; i++) {
+ read_doorbell = readl(&phbcmu->iobound_doorbell);
+ if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(0, &phbcmu->host_int_status); /*clear interrupt*/
+ pACB->in_doorbell = read_doorbell;
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
}
+static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 30;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ do {
+ if (arcmsr_hbaE_wait_msgint_ready(pACB))
+ break;
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_flush_cache(acb);
+ break;
}
}
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ uint32_t completeQ_size;
+ completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
+ acb->roundup_ccbsize = roundup(completeQ_size, 32);
+ dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ }
+ break;
default:
break;
}
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
- acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
+ acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
- for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ for(i = 0; i < acb->maxFreeCCB; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
+ ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
}
atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
unsigned long flags;
int nvec, i;
+ if (msix_enable == 0)
+ goto msi_int0;
nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
PCI_IRQ_MSIX);
if (nvec > 0) {
pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
flags = 0;
} else {
- nvec = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+msi_int0:
+ if (msi_enable == 1) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1) {
+ dev_info(&pdev->dev, "msi enabled\n");
+ goto msi_int1;
+ }
+ }
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (nvec < 1)
return FAILED;
-
+msi_int1:
flags = IRQF_SHARED;
}
@@ -755,6 +871,24 @@ out_free_irq:
return FAILED;
}
+static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
+{
+ INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&pacb->rq_map_token, 16);
+ atomic_set(&pacb->ante_token_value, 16);
+ pacb->fw_flag = FW_NORMAL;
+ timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
+ pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ add_timer(&pacb->eternal_timer);
+}
+
+static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
+{
+ timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
+ pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ add_timer(&pacb->refresh_timer);
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
- host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
+ host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+ host->can_queue = host_can_queue; /* max simultaneous cmds */
+ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
+ cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+ host->cmd_per_lun = cmd_per_lun;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
scsi_scan_host(host);
return 0;
out_free_sysfs:
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
del_timer_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
return 0;
controller_stop:
arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
return true;
}
+static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtnval = arcmsr_hbaE_abort_allcmd(acb);
+ break;
}
return rtnval;
}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = DID_OK << 16;
+ pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
+ pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ orig_mask = readl(&reg->host_int_mask);
+ writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
+ readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
+ }
+ break;
}
return orig_mask;
}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
- && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ && (i++ < acb->maxOutstanding)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
- while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
pmu->doneq_index = 0x40FF;
}
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_postqueue_isr(acb);
+ break;
}
}
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ break;
+ }
}
}
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_unlock_irqrestore(&acb->postq_lock, flags);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *pmu = acb->pmuE;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
"timeout\n", pACB->host->host_no);
}
+static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_stop_bgrb(acb);
+ break;
}
}
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
uint8_t __iomem *iop_data;
uint32_t iop_len;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
arcmsr_write_ioctldata2iop_in_DWORD(acb);
return;
}
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
}
+static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell, in_doorbell, tmp;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ do {
+ writel(0, &reg->host_int_status); /* clear interrupt */
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
+ arcmsr_iop2drv_data_read_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ arcmsr_hbaE_message_isr(pACB);
+ }
+ tmp = in_doorbell;
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = tmp ^ in_doorbell;
+ } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
+ pACB->in_doorbell = in_doorbell;
+}
+
static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_E __iomem *pmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ pmu = acb->pmuE;
+ while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag
+ & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &pmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
readl(reg->outbound_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ writel(0, &reg->host_int_status);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ host_interrupt_status = readl(&pmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbaE_doorbell_isr(pACB);
+ }
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbaE_postqueue_isr(pACB);
+ }
+ host_interrupt_status = readl(&pmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_E:
+ return arcmsr_hbaE_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
- char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
- char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
- miscellaneous data' timeout \n", acb->host->host_no);
- return false;
- }
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
+ uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
+ uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
+ uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
+ uint32_t *firm_model = &rwbuffer[15];
+ uint32_t *firm_version = &rwbuffer[17];
+ uint32_t *device_map = &rwbuffer[21];
+
+ count = 2;
+ while (count) {
+ *acb_firm_model = readl(firm_model);
acb_firm_model++;
- iop_firm_model++;
+ firm_model++;
count--;
}
-
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
+ count = 4;
+ while (count) {
+ *acb_firm_version = readl(firm_version);
acb_firm_version++;
- iop_firm_version++;
+ firm_version++;
count--;
}
-
- count=16;
- while(count){
- *acb_device_map = readb(iop_device_map);
+ count = 4;
+ while (count) {
+ *acb_device_map = readl(device_map);
acb_device_map++;
- iop_device_map++;
+ device_map++;
count--;
}
+ pACB->signature = readl(&rwbuffer[0]);
+ pACB->firm_request_len = readl(&rwbuffer[1]);
+ pACB->firm_numbers_queue = readl(&rwbuffer[2]);
+ pACB->firm_sdram_size = readl(&rwbuffer[3]);
+ pACB->firm_hd_channels = readl(&rwbuffer[4]);
+ pACB->firm_cfg_version = readl(&rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[0]);
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+}
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ arcmsr_wait_firmware_ready(acb);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- /*firm_model,15,60-67*/
- char __iomem *iop_firm_version;
- /*firm_version,17,68-83*/
- char __iomem *iop_device_map;
- /*firm_version,21,84-99*/
- int count;
-
- iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
- iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
- iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
arcmsr_wait_firmware_ready(acb);
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
-
- count = 16;
- while(count){
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
- }
-
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
-
- acb->signature = readl(&reg->message_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- /*firm_ide_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*firm_ide_channels,4,16-19*/
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
- uint32_t intmask_org, Index, firmware_state = 0;
+ uint32_t intmask_org;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
- char *acb_firm_model = pACB->firm_model;
- char *acb_firm_version = pACB->firm_version;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
- int count;
+
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
/* wait firmware ready */
- do {
- firmware_state = readl(&reg->outbound_msgaddr1);
- } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ arcmsr_wait_firmware_ready(pACB);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
/* wait message ready */
- for (Index = 0; Index < 2000; Index++) {
- if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
- break;
- }
- udelay(10);
- } /*max 1 seconds*/
- if (Index >= 2000) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- pACB->host->host_no,
- pACB->firm_model,
- pACB->firm_version);
- pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
- pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
- pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
- pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
- pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*all interrupt service will be enable at arcmsr_iop_init*/
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
{
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- char __iomem *iop_firm_version;
- char __iomem *iop_device_map;
- u32 count;
struct MessageUnit_D *reg = acb->pmuD;
- iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
- iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
- iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
if (readl(acb->pmuD->outbound_doorbell) &
ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
acb->pmuD->outbound_doorbell);/*clear interrupt*/
}
+ arcmsr_wait_firmware_ready(acb);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
/* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
"miscellaneous data timeout\n", acb->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
+ arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
+ return true;
+}
+
+static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ mdelay(20);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", pACB->host->host_no);
+ return false;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
- /*firm_hd_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_get_config(acb);
+ break;
default:
break;
}
- if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
- acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ if (acb->host->can_queue >= acb->firm_numbers_queue)
+ acb->host->can_queue = acb->maxOutstanding;
else
- acb->maxOutstanding = acb->firm_numbers_queue - 1;
- acb->host->can_queue = acb->maxOutstanding;
+ acb->maxOutstanding = acb->host->can_queue;
+ acb->maxFreeCCB = acb->host->can_queue;
+ if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
+ acb->maxFreeCCB += 64;
return rtn;
}
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
return rtn;
}
+static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
+ uint16_t cmdSMID;
+ unsigned long flags;
+ int rtn;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ polling_hbaC_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
+ doneq_index) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaC_ccb_retry;
+ }
+ }
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ acb->doneq_index = doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ pCCB = acb->pccb_pool[cmdSMID];
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check if command done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ writel(doneq_index, &reg->reply_post_consumer_index);
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
+static void arcmsr_set_iop_datetime(struct timer_list *t)
+{
+ struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ unsigned int next_time;
+ struct tm tm;
+
+ union {
+ struct {
+ uint16_t signature;
+ uint8_t year;
+ uint8_t month;
+ uint8_t date;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ } a;
+ struct {
+ uint32_t msg_time[2];
+ } b;
+ } datetime;
+
+ time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
+
+ datetime.a.signature = 0x55AA;
+ datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
+ datetime.a.month = tm.tm_mon;
+ datetime.a.date = tm.tm_mday;
+ datetime.a.hour = tm.tm_hour;
+ datetime.a.minute = tm.tm_min;
+ datetime.a.second = tm.tm_sec;
+
+ switch (pacb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = pacb->pmuA;
+ writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_B *reg = pacb->pmuB;
+ rwbuffer = reg->message_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = pacb->pmuC;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = pacb->pmuD;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = pacb->pmuE;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ }
+ if (sys_tz.tz_minuteswest)
+ next_time = ARCMSR_HOURS;
+ else
+ next_time = ARCMSR_MINUTES;
+ mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
+}
+
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
dma_coherent_handle = acb->dma_coherent_handle2;
break;
+ case ACB_ADAPTER_TYPE_E:
+ dma_coherent_handle = acb->dma_coherent_handle +
+ offsetof(struct CommandControlBlock, arcmsr_cdb);
+ break;
default:
dma_coherent_handle = acb->dma_coherent_handle;
break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
+ writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
+ cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout \n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- }
-}
-
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_B *reg = acb->pmuB;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_C __iomem *reg = acb->pmuC;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ break;
}
- return;
}
-static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct MessageUnit_D *reg = acb->pmuD;
-
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
- ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ (acb->acb_flags & ACB_F_BUS_RESET) ||
+ (acb->acb_flags & ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer,
jiffies + msecs_to_jiffies(6 * HZ));
} else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
msecs_to_jiffies(6 * HZ));
return;
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
- reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- }
-}
-
-static void arcmsr_request_device_map(struct timer_list *t)
-{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- switch (acb->adapter_type) {
+ switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_hbaA_request_device_map(acb);
- }
- break;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ break;
+ }
case ACB_ADAPTER_TYPE_B: {
- arcmsr_hbaB_request_device_map(acb);
- }
- break;
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ break;
+ }
case ACB_ADAPTER_TYPE_C: {
- arcmsr_hbaC_request_device_map(acb);
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ default:
+ return;
}
- break;
- case ACB_ADAPTER_TYPE_D:
- arcmsr_hbaD_request_device_map(acb);
- break;
+ acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
}
}
+static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &pmu->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout \n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_start_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_start_bgrb(acb);
+ break;
}
}
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- /*clear interrupt and message state*/
- writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ uint32_t outbound_doorbell, i;
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->iop2drv_doorbell);
+ if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ } else
+ break;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ uint32_t i, tmp;
+
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ tmp = acb->in_doorbell;
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
}
}
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if (acb->dev_id == 0x1884) {
+ struct MessageUnit_E __iomem *pmuE = acb->pmuE;
+ do {
+ count++;
+ writel(0x4, &pmuE->write_sequence_3xxx);
+ writel(0xB, &pmuE->write_sequence_3xxx);
+ writel(0x2, &pmuE->write_sequence_3xxx);
+ writel(0x7, &pmuE->write_sequence_3xxx);
+ writel(0xD, &pmuE->write_sequence_3xxx);
+ mdelay(10);
+ } while (((readl(&pmuE->host_diagnostic_3xxx) &
+ ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) {
writel(0x20, pmuD->reset_request);
} else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
msleep(1000);
return;
}
+
+static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
+{
+ bool rtn = true;
+
+ switch(acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ rtn = ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ struct MessageUnit_B *reg = acb->pmuB;
+ rtn = ((readl(reg->iop2drv_doorbell) &
+ ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:{
+ struct MessageUnit_D *reg = acb->pmuD;
+ rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
+ true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_E:{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ rtn = (readl(&reg->host_diagnostic_3xxx) &
+ ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
+ }
+ break;
+ }
+ return rtn;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
- uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
- printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
+ " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
- switch(acb->adapter_type){
- case ACB_ADAPTER_TYPE_A:{
- if (acb->acb_flags & ACB_F_BUS_RESET){
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_A __iomem *reg;
- reg = acb->pmuA;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep_again:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep_again;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_B:{
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = FAILED;
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_C:{
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_C __iomem *reg;
- reg = acb->pmuC;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_D: {
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- pr_notice("arcmsr: there is an bus reset"
- " eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags
- & ACB_F_BUS_RESET) == 0, 220 * HZ);
- if (timeout)
- return SUCCESS;
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_D *reg;
- reg = acb->pmuD;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
- nap:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(reg->sample_at_reset) & 0x80) != 0) {
- pr_err("arcmsr%d: waiting for "
- "hw bus reset return, retry=%d\n",
- acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- pr_err("arcmsr%d: waiting for hw bus"
- " reset return, "
- "RETRY TERMINATED!!\n",
- acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto nap;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- pr_err("arcmsr: scsi bus reset "
- "eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+wait_reset_done:
+ ssleep(ARCMSR_SLEEPTIME);
+ if (arcmsr_reset_in_progress(acb)) {
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_notice("arcmsr%d: waiting for hw bus reset"
+ " return, RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
}
- break;
+ retry_count++;
+ goto wait_reset_done;
}
+ arcmsr_iop_init(acb);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_notice("arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
}
return rtn;
}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
intmask_org = arcmsr_disable_outbound_ints(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
};
- *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *npciids = ARRAY_SIZE(__pciids);
*pciids = __pciids;
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
#define BFA_TRC_TS(_trcm) \
({ \
- struct timeval tv; \
+ struct timespec64 ts; \
\
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
+ ktime_get_ts64(&ts); \
+ (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
})
#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
enum bfa_aen_category aen_category;
u32 aen_type;
union bfa_aen_data_u aen_data;
- struct timeval aen_tv;
+ u64 aen_tv_sec;
+ u64 aen_tv_usec;
u32 seq_num;
u32 bfad_num;
};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
}
bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
{
struct bfa_itnim_s *itnim;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
return BFA_STATUS_IOPROFILE_OFF;
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ /* unsigned 32-bit time_t overflow here in y2106 */
itnim->ioprofile.io_profile_start_time =
bfa_io_profile_start_time(itnim->bfa);
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
- u32 io_profile_start_time;
+ time64_t io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = cpu_to_be16(ioc->clscode);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -2803,7 +2805,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_busy = BFA_FALSE;
if (status == BFA_STATUS_OK) {
- struct timeval tv;
-
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
- do_gettimeofday(&tv);
- port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
}
if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
{
- struct timeval tv;
-
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
if (port->stats_cbfn) {
port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
- struct timeval tv;
-
WARN_ON(!port);
port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
bfa_trc(port, 0);
}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
- u32 stats_reset_time;
+ time64_t stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
return 0;
}
-static u64
-bfa_get_log_time(void)
-{
- u64 system_time = 0;
- struct timeval tv;
- do_gettimeofday(&tv);
-
- /* We are interested in seconds only. */
- system_time = tv.tv_sec;
- return system_time;
-}
-
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
- pl_recp->tv = bfa_get_log_time();
+ pl_recp->tv = ktime_get_real_seconds();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
- struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
fcport->stats_dma_ready = BFA_FALSE;
/*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
union bfa_fcport_stats_u *ret;
if (complete) {
- struct timeval tv;
- if (fcport->stats_status == BFA_STATUS_OK)
- do_gettimeofday(&tv);
+ time64_t time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
- tv.tv_sec - fcport->stats_reset_time;
+ time - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
struct list_head *qe, *qen;
if (complete) {
- struct timeval tv;
-
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
/*
* D-port
*/
-#define bfa_dport_result_start(__dport, __mode) do { \
- (__dport)->result.start_time = bfa_get_log_time(); \
- (__dport)->result.status = DPORT_TEST_ST_INPRG; \
- (__dport)->result.mode = (__mode); \
- (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
- (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
- (__dport)->result.lpcnt = (__dport)->lpcnt; \
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = ktime_get_real_seconds(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
} while (0)
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
switch (dport->i2hmsg.scn.state) {
case BFI_DPORT_SCN_TESTCOMP:
- dport->result.end_time = bfa_get_log_time();
+ dport->result.end_time = ktime_get_real_seconds();
bfa_trc(dport->bfa, dport->result.end_time);
dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
case BFI_DPORT_SCN_SUBTESTSTART:
subtesttype = msg->info.teststart.type;
dport->result.subtest[subtesttype].start_time =
- bfa_get_log_time();
+ ktime_get_real_seconds();
dport->result.subtest[subtesttype].status =
DPORT_TEST_ST_INPRG;
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
struct list_head stats_pending_q;
struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
- u32 stats_reset_time; /* stats reset time stamp */
+ time64_t stats_reset_time; /* stats reset time stamp */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
- kva_elem->kva = vmalloc(kva_elem->mem_len);
+ kva_elem->kva = vzalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
- memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
- struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
return 0;
}
- port = im_port->port;
-
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 72ca2a2e08e2..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
- struct timeval tv;
unsigned long flags;
- do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
- iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -3135,7 +3133,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
void *payload_kbuf;
int rc = -EINVAL;
@@ -3350,7 +3349,8 @@ int
bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a4ec80..c05d6e91e4bd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
struct device *dev)
{
+ struct bfad_im_port_pointer *im_portp;
int error = 1;
mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
goto out_free_idr;
}
- im_port->shost->hostdata[0] = (unsigned long)im_port;
+ im_portp = shost_priv(im_port->shost);
+ im_portp->p = im_port;
im_port->shost->unique_id = im_port->idr_id;
im_port->shost->this_id = -1;
im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
sht->sg_tablesize = bfad->cfg_data.io_max_sge;
- return scsi_host_alloc(sht, sizeof(unsigned long));
+ return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
}
void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c81ec2a77ef5..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
struct fc_vport *fc_vport;
};
+struct bfad_im_port_pointer {
+ struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+ struct bfad_im_port_pointer *im_portp = shost_priv(host);
+ return im_portp->p;
+}
+
enum bfad_itnim_state {
ITNIM_STATE_NONE,
ITNIM_STATE_ONLINE,
@@ -131,16 +141,28 @@ struct bfad_im_s {
} while (0)
/* post fc_host vendor event */
-#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
- do_gettimeofday(&(_entry)->aen_tv); \
- (_entry)->bfad_num = (_drv)->inst_no; \
- (_entry)->seq_num = (_cnt); \
- (_entry)->aen_category = (_cat); \
- (_entry)->aen_type = (_evt); \
- if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
- queue_work((_drv)->im->drv_workq, \
- &(_drv)->im->aen_im_notify_work); \
-} while (0)
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+ struct bfad_s *drv, int cnt,
+ enum bfa_aen_category cat,
+ enum bfa_ioc_aen_event evt)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ /*
+ * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+ * architectures, or in 2038 if user space interprets it
+ * as 'signed'.
+ */
+ entry->aen_tv_sec = ts.tv_sec;
+ entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ entry->bfad_num = drv->inst_no;
+ entry->seq_num = cnt;
+ entry->aen_category = cat;
+ entry->aen_type = evt;
+ if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+ queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
interface->netdev->name);
goto lp_config_err;
}
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
/* Initialize the libfc library */
rc = bnx2fc_libfc_config(lport);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ printk(KERN_ERR PFX "Couldn't configure libfc\n");
goto shost_err;
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
+ mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->sq, 0, tgt->sq_mem_size);
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->cq, 0, tgt->cq_mem_size);
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
- &tgt->xferq_dma, GFP_KERNEL);
+ tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->xferq, 0, tgt->xferq_mem_size);
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
- &tgt->confq_dma, GFP_KERNEL);
+ tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
/* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->lcq, 0, tgt->lcq_mem_size);
tgt->conn_db->rq_prod = 0x8000;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
- if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_wqe->lun[0];
- /* 57710 requires LUN field to be swapped */
- nopout_wqe->lun[0] = nopout_wqe->lun[1];
- nopout_wqe->lun[1] = tmp;
- }
+ /* 57710 requires LUN field to be swapped */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
-MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
-#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0-ko"
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
- csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
- "Couldnt own Mailbox %x op:0x%x "
+ "Couldn't own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
- "Couldnt own Mailbox %x "
+ "Couldn't own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
} else {
- task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+
+struct cxlflash_backend_ops {
+ struct module *module;
+ void __iomem * (*psa_map)(void *);
+ void (*psa_unmap)(void __iomem *);
+ int (*process_element)(void *);
+ int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
+ void (*unmap_afu_irq)(void *, int, void *);
+ int (*start_context)(void *);
+ int (*stop_context)(void *);
+ int (*afu_reset)(void *);
+ void (*set_master)(void *);
+ void * (*get_context)(struct pci_dev *, void *);
+ void * (*dev_context_init)(struct pci_dev *, void *);
+ int (*release_context)(void *);
+ void (*perst_reloads_same_image)(void *, bool);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
+ int (*allocate_afu_irqs)(void *, int);
+ void (*free_afu_irqs)(void *);
+ void * (*create_afu)(struct pci_dev *);
+ struct file * (*get_fd)(void *, struct file_operations *, int *);
+ void * (*fops_get_context)(struct file *);
+ int (*start_work)(void *, u64);
+ int (*fd_mmap)(struct file *, struct vm_area_struct *);
+ int (*fd_release)(struct inode *, struct file *);
+};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
struct afu *afu;
+ const struct cxlflash_backend_ops *ops;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
int lr_port;
atomic_t scan_host_needed;
- struct cxl_afu *cxl_afu;
+ void *afu_cookie;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
* fields after this point
*/
struct afu *afu;
- struct cxl_context *ctx;
- struct cxl_ioctl_start_work work;
+ void *ctx_cookie;
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+
+#include "backend.h"
+
+/*
+ * The following routines map the cxlflash backend operations to existing CXL
+ * kernel API function and are largely simple shims that provide an abstraction
+ * for converting generic context and AFU cookies into cxl_context or cxl_afu
+ * pointers.
+ */
+
+static void __iomem *cxlflash_psa_map(void *ctx_cookie)
+{
+ return cxl_psa_map(ctx_cookie);
+}
+
+static void cxlflash_psa_unmap(void __iomem *addr)
+{
+ cxl_psa_unmap(addr);
+}
+
+static int cxlflash_process_element(void *ctx_cookie)
+{
+ return cxl_process_element(ctx_cookie);
+}
+
+static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
+}
+
+static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ cxl_unmap_afu_irq(ctx_cookie, num, cookie);
+}
+
+static int cxlflash_start_context(void *ctx_cookie)
+{
+ return cxl_start_context(ctx_cookie, 0, NULL);
+}
+
+static int cxlflash_stop_context(void *ctx_cookie)
+{
+ return cxl_stop_context(ctx_cookie);
+}
+
+static int cxlflash_afu_reset(void *ctx_cookie)
+{
+ return cxl_afu_reset(ctx_cookie);
+}
+
+static void cxlflash_set_master(void *ctx_cookie)
+{
+ cxl_set_master(ctx_cookie);
+}
+
+static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_get_context(dev);
+}
+
+static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_dev_context_init(dev);
+}
+
+static int cxlflash_release_context(void *ctx_cookie)
+{
+ return cxl_release_context(ctx_cookie);
+}
+
+static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ cxl_perst_reloads_same_image(afu_cookie, image);
+}
+
+static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
+ void *buf, size_t count)
+{
+ return cxl_read_adapter_vpd(dev, buf, count);
+}
+
+static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return cxl_allocate_afu_irqs(ctx_cookie, num);
+}
+
+static void cxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ cxl_free_afu_irqs(ctx_cookie);
+}
+
+static void *cxlflash_create_afu(struct pci_dev *dev)
+{
+ return cxl_pci_to_afu(dev);
+}
+
+static struct file *cxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ return cxl_get_fd(ctx_cookie, fops, fd);
+}
+
+static void *cxlflash_fops_get_context(struct file *file)
+{
+ return cxl_fops_get_context(file);
+}
+
+static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
+{
+ struct cxl_ioctl_start_work work = { 0 };
+
+ work.num_interrupts = irqs;
+ work.flags = CXL_START_WORK_NUM_IRQS;
+
+ return cxl_start_work(ctx_cookie, &work);
+}
+
+static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ return cxl_fd_mmap(file, vm);
+}
+
+static int cxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return cxl_fd_release(inode, file);
+}
+
+const struct cxlflash_backend_ops cxlflash_cxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = cxlflash_psa_map,
+ .psa_unmap = cxlflash_psa_unmap,
+ .process_element = cxlflash_process_element,
+ .map_afu_irq = cxlflash_map_afu_irq,
+ .unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .start_context = cxlflash_start_context,
+ .stop_context = cxlflash_stop_context,
+ .afu_reset = cxlflash_afu_reset,
+ .set_master = cxlflash_set_master,
+ .get_context = cxlflash_get_context,
+ .dev_context_init = cxlflash_dev_context_init,
+ .release_context = cxlflash_release_context,
+ .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = cxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
+ .free_afu_irqs = cxlflash_free_afu_irqs,
+ .create_afu = cxlflash_create_afu,
+ .get_fd = cxlflash_get_fd,
+ .fops_get_context = cxlflash_fops_get_context,
+ .start_work = cxlflash_start_work,
+ .fd_mmap = cxlflash_fd_mmap,
+ .fd_release = cxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
cmd->parent = afu;
cmd->hwq_index = hwq_index;
+ cmd->sa.ioasc = 0;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
+ cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
- cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(hwq->ctx);
+ cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
- WARN_ON(cxl_stop_context(hwq->ctx));
+ WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ)
- WARN_ON(cxl_release_context(hwq->ctx));
- hwq->ctx = NULL;
+ WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
+ hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
}
/**
- * start_context() - starts the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Return: A success or failure value from CXL services.
- */
-static int start_context(struct cxlflash_cfg *cfg, u32 index)
-{
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
-
- rc = cxl_start_context(hwq->ctx,
- hwq->work.work_element_descriptor,
- NULL);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
* @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
+ vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq;
+ void *cookie;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
+ cookie = hwq->ctx_cookie;
- hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
struct hwq *hwq)
{
struct device *dev = &cfg->dev->dev;
- struct cxl_context *ctx = hwq->ctx;
+ void *ctx = hwq->ctx_cookie;
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, num_irqs);
+ rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
+ "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
+ rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
+ "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq)
goto out;
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
+ "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
*/
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
- struct cxl_context *ctx;
+ void *ctx;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
- ctx = cxl_get_context(cfg->dev);
+ ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
- ctx = cxl_dev_context_init(cfg->dev);
- if (unlikely(!ctx)) {
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
}
- WARN_ON(hwq->ctx);
- hwq->ctx = ctx;
+ WARN_ON(hwq->ctx_cookie);
+ hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */
- cxl_set_master(ctx);
+ cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) {
- rc = cxl_afu_reset(ctx);
+ rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2;
}
- /* This performs the equivalent of the CXL_IOCTL_START_WORK.
- * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
- * element (pe) that is embedded in the context (ctx)
- */
- rc = start_context(cfg, index);
+ /* Finally, activate the context by starting it */
+ rc = cfg->ops->start_context(hwq->ctx_cookie);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
err2:
term_intr(cfg, level, index);
if (index != PRIMARY_HWQ)
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
err1:
- hwq->ctx = NULL;
+ hwq->ctx_cookie = NULL;
goto out;
}
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq;
int i;
- cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+ cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cxl_psa_map(hwq->ctx);
+ afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) {
- dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
+ dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->cxl_afu = cxl_pci_to_afu(pdev);
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
* init_context() - initializes a previously allocated context
* @ctxi: Previously allocated context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
+ * @ctx: Previously obtained context cookie.
* @ctxid: Previously obtained process element associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
+ * @irqs: User-specified number of interrupts.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, struct file *file,
- u32 perms)
+ void *ctx, int ctxid, struct file *file, u32 perms,
+ u64 irqs)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->irqs = irqs;
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release:
- cxl_fd_release(inode, file);
+ cfg->ops->fd_release(inode, file);
out:
dev_dbg(dev, "%s: returning\n", __func__);
return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
int rc = 0;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
- rc = cxl_fd_mmap(file, vma);
+ rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
- struct cxl_ioctl_start_work *work;
struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL;
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 irqs = attach->num_interrupts;
u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
- struct cxl_context *ctx = NULL;
+ void *ctx = NULL;
int fd = -1;
- if (attach->num_interrupts > 4) {
+ if (irqs > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, attach->num_interrupts);
+ __func__, irqs);
rc = -EINVAL;
goto out;
}
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
+ rc = cfg->ops->start_work(ctx, irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) {
- cxl_stop_context(ctx);
- cxl_release_context(ctx);
+ cfg->ops->stop_context(ctx);
+ cfg->ops->release_context(ctx);
ctx = NULL;
}
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
int fd = -1;
int ctxid = -1;
struct file *file;
- struct cxl_context *ctx;
+ void *ctx;
struct afu *afu = cfg->afu;
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_start_work(ctx, &ctxi->work);
+ rc = cfg->ops->start_work(ctx, ctxi->irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err1;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
fput(file);
put_unused_fd(fd);
err2:
- cxl_stop_context(ctx);
+ cfg->ops->stop_context(ctx);
err1:
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
- struct cxl_ioctl_start_work work;
u64 ctxid;
+ u64 irqs; /* Number of interrupts requested for context */
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
struct kref kref;
- struct cxl_context *ctx;
+ void *ctx;
struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define TPGS_SUPPORT_ALL 0xdf
#define RTPG_FMT_MASK 0x70
#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
int tpgs;
int state;
int pref;
+ int valid_states;
unsigned flags; /* used for optimizing STPG */
unsigned char transition_tmo;
unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
pg->group_id = group_id;
pg->tpgs = tpgs;
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ pg->valid_states = TPGS_SUPPORT_ALL;
if (optimize_stpg)
pg->flags |= ALUA_OPTIMIZE_STPG;
kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
- int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ int len, k, off, bufflen = ALUA_RTPG_SIZE;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) {
+ /*
+ * Some (broken) implementations have a habit of returning
+ * an error during things like firmware update etc.
+ * But if the target only supports active/optimized there's
+ * not much we can do; it's not that we can switch paths
+ * or anything.
+ * So ignore any errors to avoid spurious failures during
+ * path failover.
+ */
+ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: ignoring rtpg result %d\n",
+ ALUA_DH_NAME, retval);
+ kfree(buff);
+ return SCSI_DH_OK;
+ }
if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_unlock();
}
if (tmp_pg == pg)
- valid_states = desc[1];
+ tmp_pg->valid_states = desc[1];
spin_unlock_irqrestore(&tmp_pg->lock, flags);
}
kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg->pref ? "preferred" : "non-preferred",
- valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
- * Or Open fc_trace_enable file for fc_trace
- * @inode: The inode pointer.
- * @file: The file pointer to attach the trace enable/disable flag.
- *
- * Description:
- * This routine opens a debugsfs file trace_enable or fc_trace_enable.
- *
- * Returns:
- * This function returns zero if successful.
- */
-static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
+ .open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
- getnstimeofday(&stats->stats_timestamps.last_reset_time);
+ ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kmalloc(sizeof(*vlan),
- GFP_ATOMIC);
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
- memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p"
+ "hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
#define _FNIC_STATS_H_
struct stats_timestamps {
- struct timespec last_reset_time;
- struct timespec last_read_time;
+ struct timespec64 last_reset_time;
+ struct timespec64 last_read_time;
};
struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
- struct timespec val;
+ struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
{
int len = 0;
int buf_size = debug->buf_size;
- struct timespec val1, val2;
+ struct timespec64 val1, val2;
- getnstimeofday(&val1);
+ ktime_get_real_ts64(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Current time : [%ld:%ld]\n"
- "Last stats reset time: [%ld:%ld]\n"
- "Last stats read time: [%ld:%ld]\n"
- "delta since last reset: [%ld:%ld]\n"
- "delta since last read: [%ld:%ld]\n",
- val1.tv_sec, val1.tv_nsec,
- stats->stats_timestamps.last_reset_time.tv_sec,
+ "Current time : [%lld:%ld]\n"
+ "Last stats reset time: [%lld:%09ld]\n"
+ "Last stats read time: [%lld:%ld]\n"
+ "delta since last reset: [%lld:%ld]\n"
+ "delta since last read: [%lld:%ld]\n",
+ (s64)val1.tv_sec, val1.tv_nsec,
+ (s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
- stats->stats_timestamps.last_read_time.tv_sec,
+ (s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
- jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
- jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+ jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Last ISR time: %llu (%8lu.%8lu)\n"
- "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Last ISR time: %llu (%8llu.%09lu)\n"
+ "Last ACK time: %llu (%8llu.%09lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
- val1.tv_sec, val1.tv_nsec,
+ (s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
- val2.tv_sec, val2.tv_nsec,
+ (s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
const struct hisi_sas_hw_error *sub;
};
+struct hisi_sas_rst {
+ struct hisi_hba *hisi_hba;
+ struct completion *completion;
+ struct work_struct work;
+ bool done;
+};
+
+#define HISI_SAS_RST_WORK_INIT(r, c) \
+ { .hisi_hba = hisi_hba, \
+ .completion = &c, \
+ .work = __WORK_INITIALIZER(r.work, \
+ hisi_sas_sync_rst_work_handler), \
+ .done = false, \
+ }
+
+#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
+ DECLARE_COMPLETION_ONSTACK(c); \
+ DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
+ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
+
+enum hisi_sas_bit_err_type {
+ HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
+ HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
+};
+
+enum hisi_sas_phy_event {
+ HISI_PHYE_PHY_UP = 0U,
+ HISI_PHYE_LINK_RESET,
+ HISI_PHYES_NUM,
+};
+
struct hisi_sas_phy {
+ struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*free_device)(struct hisi_hba *hisi_hba,
+ void (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
+ void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
+ int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data);
int max_command_entries;
int complete_hdr_size;
};
@@ -225,6 +259,7 @@ struct hisi_hba {
struct device *dev;
void __iomem *regs;
+ void __iomem *sgpio_regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
+extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
+ int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event);
+extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
-u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
- switch (cmd) {
+ switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
default:
+ {
+ if (fis->command == ATA_CMD_SET_MAX) {
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+ }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
}
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
if (n_elem)
- dma_unmap_sg(dev, task->scatter, n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
prep_out:
return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
}
}
+ dev_info(dev, "dev[%d:%x] found\n",
+ sas_dev->device_id, sas_dev->dev_type);
+
return 0;
}
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
static void hisi_sas_phyup_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
- container_of(work, struct hisi_sas_phy, phyup_ws);
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
+static void hisi_sas_linkreset_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+}
+
+static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
+ [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
+ [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+};
+
+bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (WARN_ON(event >= HISI_PHYES_NUM))
+ return false;
+
+ return queue_work(hisi_hba->wq, &phy->works[event]);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
phy->hisi_hba = hisi_hba;
phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
sas_phy->lldd_phy = phy;
- INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
+ for (i = 0; i < HISI_PHYES_NUM; i++)
+ INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
+void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
struct hisi_sas_device *sas_dev;
struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
hisi_sas_release_task(hisi_hba, device);
}
}
+EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- dev_info(dev, "found dev[%d:%x] is gone\n",
+ dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
- hisi_sas_internal_task_abort(hisi_hba, device,
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- hisi_sas_dereg_device(hisi_hba, device);
+ hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
- device->lldd_dev = NULL;
- memset(sas_dev, 0, sizeof(*sas_dev));
+ hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ device->lldd_dev = NULL;
+ }
+
+ if (hisi_hba->hw->free_device)
+ hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
}
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- dev_err(dev, "abort tmf: TMF task timeout\n");
+ dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot)
slot->task = NULL;
goto ex_err;
- }
+ } else
+ dev_err(dev, "abort tmf: TMF task timeout\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
-static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
- struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
- struct hisi_sas_device *sas_dev;
- struct domain_device *device;
+ u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
int i;
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
- sas_dev = &hisi_hba->devices[i];
- device = sas_dev->sas_device;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+ struct asd_sas_port *sas_port;
+ struct hisi_sas_port *port;
+ struct hisi_sas_phy *phy = NULL;
+ struct asd_sas_phy *sas_phy;
+
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
- || !device || (device->port != sas_port))
+ || !device || !device->port)
continue;
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
+ sas_port = device->port;
+ port = to_hisi_sas_port(sas_port);
+
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
+ if (state & BIT(sas_phy->id)) {
+ phy = sas_phy->lldd_phy;
+ break;
+ }
+
+ if (phy) {
+ port->id = phy->port_id;
- /* Update linkrate of directly attached device. */
- if (!device->parent)
- device->linkrate = linkrate;
+ /* Update linkrate of directly attached device. */
+ if (!device->parent)
+ device->linkrate = phy->sas_phy.linkrate;
- hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ } else
+ port->id = 0xff;
}
}
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
bool do_port_check = !!(_sas_port != sas_port);
if (!sas_phy->phy->enabled)
continue;
/* Report PHY state change to libsas */
- if (state & (1 << phy_no)) {
- if (do_port_check && sas_port) {
+ if (state & BIT(phy_no)) {
+ if (do_port_check && sas_port && sas_port->port_dev) {
struct domain_device *dev = sas_port->port_dev;
_sas_port = sas_port;
- port->id = phy->port_id;
- hisi_sas_refresh_port_id(hisi_hba,
- sas_port, sas_phy->linkrate);
if (DEV_IS_EXPANDER(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
- drain_workqueue(hisi_hba->shost->work_q);
}
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
- dev_dbg(dev, "controller resetting...\n");
+ dev_info(dev, "controller resetting...\n");
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (rc) {
dev_warn(dev, "controller reset failed (%d)\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
goto out;
}
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
- drain_workqueue(hisi_hba->wq);
- drain_workqueue(shost->work_q);
+ hisi_sas_refresh_port_id(hisi_hba);
+ scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_dbg(dev, "controller reset complete\n");
+ dev_info(dev, "controller reset complete\n");
out:
- scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
+ if (rc2 < 0) {
+ dev_err(dev, "abort task: internal abort (%d)\n", rc2);
+ return TMF_RESP_FUNC_FAILED;
+ }
+
/*
* If the TMF finds that the IO is not in the device and also
* the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task: internal abort failed\n");
+ goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
+ if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
+ task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
return rc;
}
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- unsigned long flags;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
- hisi_sas_internal_task_abort(hisi_hba, device,
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_task(hisi_hba, device);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then hardreset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- if (rc == TMF_RESP_FUNC_FAILED)
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- return hisi_sas_controller_reset(hisi_hba);
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return TMF_RESP_FUNC_COMPLETE;
+
+ return TMF_RESP_FUNC_FAILED;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int res;
+ /*
+ * The interface is not realized means this HW don't support internal
+ * abort, or don't need to do internal abort. Then here, we return
+ * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
+ * the internal abort has been executed and returned CQ.
+ */
if (!hisi_hba->hw->prep_abort)
- return -EOPNOTSUPP;
+ return TMF_RESP_FUNC_FAILED;
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
- dev_err(dev, "internal task abort: timeout.\n");
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ res = -EIO;
goto exit;
- }
+ } else
+ dev_err(dev, "internal task abort: timeout.\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
+static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
+{
+}
+
+static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (!hisi_hba->hw->write_gpio)
+ return -EOPNOTSUPP;
+
+ return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
+ reg_index, reg_count, write_data);
+}
+
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
+static struct device_attribute *host_attrs[] = {
+ &dev_attr_phy_event_threshold,
+ NULL,
+};
+
static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
};
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
+ .lldd_port_deformed = hisi_sas_port_deformed,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
cq->hisi_hba = hisi_hba;
/* Delivery queue structure */
+ spin_lock_init(&dq->lock);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
+void hisi_sas_sync_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_sas_rst *rst =
+ container_of(work, struct hisi_sas_rst, work);
+
+ if (!hisi_sas_controller_reset(rst->hisi_hba))
+ rst->done = true;
+ complete(rst->completion);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
+
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hisi_hba->sgpio_regs))
+ goto err_out;
+ }
+
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void free_device_v1_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
.sl_notify = sl_notify_v1_hw,
- .free_device = free_device_v1_hw,
+ .clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
.get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
sas_dev->completion = &completion;
- /* SoC bug workaround */
- if (dev_is_sata(sas_dev->sas_device))
- clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
-
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
}
}
+static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
+{
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+}
+
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
{
int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
/* Analyse error happens on which phase TX or RX */
if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
else if (ERR_ON_RX_PHASE(err_phase))
slot_err_v2_hw(hisi_hba, task, slot, 2);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (!timer_pending(&hisi_hba->timer))
set_link_timer_quirk(hisi_hba);
}
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
}
+static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & (1 << phy_no)) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
-
- if (irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error!\
- (0x%x)",
- dev_name(dev), irq_value1);
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (1 << phy_no)) && irq_value1) {
+ int i;
- if (irq_value2)
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
+ for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_ecc_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
- if (irq_value0) {
- if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
- phy_bcast_v2_hw(phy_no, hisi_hba);
+ if ((irq_msk & (1 << phy_no)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_HOTPLUG_TOUT_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
}
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+ }
+
+ if ((irq_msk & (1 << phy_no)) && irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
}
irq_msk &= ~(1 << phy_no);
phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
for (; sub->msk || sub->msg; sub++) {
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.device_type = SAS_SATA_DEV;
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
udelay(10);
if (cnt++ > 10) {
- dev_info(dev, "wait axi bus state to idle timeout!\n");
+ dev_err(dev, "wait axi bus state to idle timeout!\n");
return -1;
}
}
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ int phy_no, count;
+
+ if (!hisi_hba->sgpio_regs)
+ return -EOPNOTSUPP;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ count = reg_count * 4;
+ count = min(count, hisi_hba->n_phy);
+
+ for (phy_no = 0; phy_no < count; phy_no++) {
+ /*
+ * GPIO_TX[n] register has the highest numbered drive
+ * of the four in the first byte and the lowest
+ * numbered drive in the fourth byte.
+ * See SFF-8485 Rev. 0.7 Table 24.
+ */
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ reg_index * 4 + phy_no;
+ int data_idx = phy_no + 3 - (phy_no % 4) * 2;
+
+ writeb(write_data[data_idx], reg_addr);
+ }
+
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
+ .write_gpio = write_gpio_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+/* RAS registers need init */
+#define RAS_BASE (0x6000)
+#define SAS_RAS_INTR0 (RAS_BASE)
+#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
+#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
+#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
upper_32_bits(hisi_hba->initial_fis_dma));
+
+ /* RAS registers init */
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
phy->phy_attached = 1;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (!(irq_value1 & error->irq_msk))
continue;
- dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
error->msg, phy_no, irq_value1);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1, irq_value1);
}
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+
+ }
+
+ if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba,
+ phy_no, STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+ }
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
/* check for erroneous completion */
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
+
slot_err_v3_hw(hisi_hba, task, slot);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
- struct hisi_sas_itct *itct;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
complete_hdr = &complete_queue[rd_point];
- /* Check for NCQ completion */
- if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
- int ncq_tag_count = ffs(act_tmp);
-
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
- CMPLT_HDR_DEV_ID_OFF;
- itct = &hisi_hba->itct[dev_id];
-
- /* The NCQ tags are held in the itct header */
- while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
-
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
-
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
-
- act_tmp &= ~(1 << ncq_tag_count);
- ncq_tag_count = ffs(act_tmp);
- }
- } else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
- }
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
- .free_device = free_device_v3_hw,
+ .clear_itct = clear_itct_v3_hw,
.sl_notify = sl_notify_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
+static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
+ { .irq_msk = BIT(19), .msg = "HILINK_INT" },
+ { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
+ { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
+ { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
+ { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
+};
+
+static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
+ { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
+ { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
+ { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
+ { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
+ { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
+ { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
+ { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
+ { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
+ { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
+ { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
+ { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
+ { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
+ { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
+};
+
+static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ras_error;
+ bool need_reset = false;
+ u32 irq_value;
+ int i;
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
+ ras_error = &sas_ras_intr0_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
+ ras_error = &sas_ras_intr1_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+
+ return need_reset;
+}
+
+static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (process_non_fatal_error_v3_hw(hisi_hba))
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+
+ dev_info(dev, "PCI error: slot reset callback!!\n");
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
enum {
/* instances of the controller */
hip08,
};
+static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 device_state, status;
+ int rc;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!pdev->pm_cap) {
+ dev_err(dev, "PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ flush_workqueue(hisi_hba->wq);
+ /* disable DQ/PHY/bus */
+ interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
+
+ hisi_sas_stop_phys(hisi_hba);
+
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+
+ /* wait until bus idle */
+ rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ if (rc) {
+ dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ device_state = pci_choose_state(pdev, state);
+ dev_warn(dev, "entering operating state [D%d]\n",
+ device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_suspend_ha(sha);
+ return 0;
+}
+
+static int hisi_sas_v3_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ unsigned int rc;
+ u32 device_state = pdev->current_state;
+
+ dev_warn(dev, "resuming from operating state [D%d]\n",
+ device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ dev_err(dev, "enable device failed during resume (%d)\n", rc);
+
+ pci_set_master(pdev);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ sas_prep_resume_ha(sha);
+ init_reg_v3_hw(hisi_hba);
+ hisi_hba->hw->phys_init(hisi_hba);
+ sas_resume_ha(sha);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
+ return 0;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
};
+static const struct pci_error_handlers hisi_sas_err_handler = {
+ .error_detected = hisi_sas_error_detected_v3_hw,
+ .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
+ .slot_reset = hisi_sas_slot_reset_v3_hw,
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
+ .suspend = hisi_sas_v3_suspend,
+ .resume = hisi_sas_v3_resume,
+ .err_handler = &hisi_sas_err_handler,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..87b260e403ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3518,7 +3518,7 @@ out:
if (rc != IO_OK)
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
- "Error, could not get enclosure information\n");
+ "Error, could not get enclosure information");
}
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
return 0;
}
-#define BUFLEN 128
static inline void warn_zero_length_transfer(struct ctlr_info *h,
u8 *cdb, int cdb_len,
const char *func)
{
- char buf[BUFLEN];
- int outlen;
- int i;
-
- outlen = scnprintf(buf, BUFLEN,
- "%s: Blocking zero-length request: CDB:", func);
- for (i = 0; i < cdb_len; i++)
- outlen += scnprintf(buf+outlen, BUFLEN - outlen,
- "%02hhx", cdb[i]);
- dev_warn(&h->pdev->dev, "%s\n", buf);
+ dev_warn(&h->pdev->dev,
+ "%s: Blocking zero-length request: CDB:%*phN\n",
+ func, cdb_len, cdb);
}
#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
if (!device)
continue;
- if (!device->scsi3addr)
- continue;
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
HPSA_VPD_LV_IOACCEL_STATUS))
continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
break;
default:
break;
- };
+ }
}
/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
}
/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
cpu_to_be64(buffer[MSG_HI]),
cpu_to_be64(buffer[MSG_LOW]));
- pr_debug("connection_broken: rc %ld\n", h_return_code);
+ dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
if (h_return_code == H_CLOSED)
rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
}
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
- pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+ dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
return rc;
}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
ibmvscsis_delete_client_info(vscsi, false);
}
- pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
return rc;
}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->flags |= DISCONNECT_SCHEDULED;
vscsi->flags &= ~SCHEDULE_DISCONNECT;
- pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/*
* check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
}
if (wait_idle) {
- pr_debug("disconnect start wait, active %d, sched %d\n",
- (int)list_empty(&vscsi->active_q),
- (int)list_empty(&vscsi->schedule_q));
+ dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
if (!list_empty(&vscsi->active_q) ||
!list_empty(&vscsi->schedule_q)) {
vscsi->flags |= WAIT_FOR_IDLE;
- pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
+ vscsi->flags);
/*
* This routine is can not be called with the interrupt
* lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
wait_for_completion(&vscsi->wait_idle);
spin_lock_bh(&vscsi->intr_lock);
}
- pr_debug("disconnect stop wait\n");
+ dev_dbg(&vscsi->dev, "disconnect stop wait\n");
ibmvscsis_adapter_idle(vscsi);
}
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
vscsi->flags |= flag_bits;
- pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
- new_state, flag_bits, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
}
- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
- vscsi->flags, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
}
/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
break;
case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
rc = 0;
break;
}
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
long rc = ADAPT_SUCCESS;
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+ dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (rc == H_SUCCESS) {
vscsi->client_data.partition_number =
be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
} else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
rc = ADAPT_SUCCESS;
}
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
if (rc == H_SUCCESS)
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
else if (rc != H_NOT_FOUND)
- pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+ dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
+ rc);
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
rc = vio_enable_interrupts(vscsi->dma_dev);
if (rc) {
- pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
return rc;
}
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
int bytes;
long rc = ADAPT_SUCCESS;
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
/* don't reset, the client did it for us */
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
}
if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
+ rc);
vscsi->state = ERR_DISCONNECTED;
vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
/* See if there is a Resume event in the queue */
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
- pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
- vscsi->flags, vscsi->state, (int)crq->valid);
+ dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
0, 0);
if (rc) {
- pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
+ rc);
rc = 0;
}
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
(crq->format != RESUME_FROM_SUSP)))
- pr_err("Invalid element in CRQ after Prepare for Suspend");
+ dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
}
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
{
long rc = ADAPT_SUCCESS;
- pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
- (int)crq->format, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
switch (crq->format) {
case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
!list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp) ||
!list_empty(&vscsi->active_q)) {
- pr_debug("debit %d, sched %d, wait %d, active %d\n",
- vscsi->debit,
- (int)list_empty(&vscsi->schedule_q),
- (int)list_empty(&vscsi->waiting_rsp),
- (int)list_empty(&vscsi->active_q));
- pr_warn("connection lost with outstanding work\n");
+ dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
} else {
- pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
}
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
break;
case PREPARE_FOR_SUSPEND:
- pr_debug("Prep for Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
switch (vscsi->state) {
case ERR_DISCONNECTED:
case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
- vscsi->state);
+ dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
break;
}
break;
case RESUME_FROM_SUSP:
- pr_debug("Resume from Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
} else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
rc = vscsi->flags & SCHEDULE_DISCONNECT;
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
- vscsi->flags, vscsi->state, rc);
+ dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
return rc;
}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
bool ack = true;
volatile u8 valid;
- pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
rc = vscsi->flags & SCHEDULE_DISCONNECT;
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
* if a tranport event has occurred leave
* everything but transport events on the queue
*/
- pr_debug("poll_cmd_q, ignoring\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
/*
* need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("poll_cmd_q, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
goto poll_work;
}
- pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
}
/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
{
struct ibmvscsis_cmd *cmd, *nxt;
- pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
- (int)list_empty(&vscsi->waiting_rsp),
- vscsi->rsp_q_timer.started);
+ dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
int free_qs = false;
long rc = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/* Only need to free qs if we're disconnecting from client */
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
break;
case ERR_DISCONNECT_RECONNECT:
ibmvscsis_reset_queue(vscsi);
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
+ vscsi->flags);
break;
case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->state = ERR_DISCONNECTED;
else
vscsi->state = WAIT_ENABLED;
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->flags &= ~DISCONNECT_SCHEDULED;
}
- pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
ibmvscsis_poll_cmd_q(vscsi);
break;
case ERR_DISCONNECTED:
vscsi->flags &= ~DISCONNECT_SCHEDULED;
- pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->phyp_acr_state = 0;
vscsi->phyp_acr_flags = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
- pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
}
/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
cmd->init_time = mftb();
iue->remote_token = crq->IU_data_ptr;
iue->iu_len = len;
- pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
- be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
break;
case H_PERMISSION:
if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
}
- pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
- rc);
- pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
- be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
flag_bits);
goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
free_dma:
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
- pr_debug("Leaving adapter_info, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
return rc;
}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
*/
min_len = offsetof(struct capabilities, migration);
if ((olen < min_len) || (olen > PAGE_SIZE)) {
- pr_warn("cap_mad: invalid len %d\n", olen);
+ dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
return 0;
}
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
common = (struct mad_capability_common *)&cap->migration;
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
- pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
- len, be32_to_cpu(common->cap_type),
- be16_to_cpu(common->length));
+ dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
cap_len = be16_to_cpu(common->length);
if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
switch (common->cap_type) {
default:
- pr_debug("cap_mad: unsupported capability\n");
+ dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
common->server_support = 0;
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer));
if (rc != H_SUCCESS) {
- pr_debug("cap_mad: failed to copy to client, rc %ld\n",
- rc);
+ dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
CLIENT_FAILED);
}
- pr_warn("cap_mad: error copying data to client, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
+ rc);
ibmvscsis_post_disconnect(vscsi,
ERR_DISCONNECT_RECONNECT,
flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
- pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
- rc, vscsi->client_cap);
+ dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
return rc;
}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
vscsi->fast_fail = true;
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
} else {
- pr_warn("fast fail mad sent after login\n");
+ dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
}
break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
(vscsi->state == SRP_PROCESSING)) {
- pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
- vscsi->flags, (int)vscsi->rsp_q_timer.started,
- vscsi->rsp_q_timer.timer_pops);
+ dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
/*
* Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag),
+ rc);
/* if all ok free up the command
* element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
} else {
- pr_debug("Error sending mad response, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
* expecting a response.
*/
case WAIT_CONNECTION:
- pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
- vscsi->flags);
+ dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
return ADAPT_SUCCESS;
case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (!rc) {
mad = (struct mad_common *)&vio_iu(iue)->mad;
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+ dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
rc = ibmvscsis_process_mad(vscsi, iue);
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
- rc);
+ dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
+ be16_to_cpu(mad->status), rc);
if (!rc)
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving mad, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
return rc;
}
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
{
char *name = tport->tport_name;
struct ibmvscsis_nexus *nexus;
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
int rc;
if (tport->ibmv_nexus) {
- pr_debug("tport->ibmv_nexus already exists\n");
+ dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
return 0;
}
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!nexus) {
- pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
return -ENOMEM;
}
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
cmd->rsp.format = VIOSRP_SRP_FORMAT;
cmd->rsp.tag = req->tag;
- pr_debug("srp_login: reason 0x%x\n", reason);
+ dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
if (reason)
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving srp_login, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
return rc;
}
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
case SRP_TSK_MGMT:
tsk = &vio_iu(iue)->srp.tsk_mgmt;
- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
- tsk->tag);
+ dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
+ tsk->tag, tsk->tag);
cmd->rsp.tag = tsk->tag;
vscsi->debit += 1;
cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
break;
case SRP_CMD:
- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
- srp->tag);
+ dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
+ srp->tag, srp->tag);
cmd->rsp.tag = srp->tag;
vscsi->debit += 1;
cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
"ibm,my-dma-window",
NULL);
if (!dma_window) {
- pr_err("Couldn't find ibm,my-dma-window property\n");
+ dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
srp_tsk->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_tmr, func %d\n",
- srp_tsk->tsk_mgmt_func);
+ dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk,
tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
if (cmd->type == SCSI_CDB) {
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
if (rsp->status) {
- pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
- (int)rsp->status);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
+ cmd, (int)rsp->status);
ibmvscsis_determine_resid(se_cmd, rsp);
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else if (cmd->flags & CMD_FAST_FAIL) {
- pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
+ cmd);
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
spin_lock_bh(&vscsi->intr_lock);
- pr_debug("got interrupt\n");
+ dev_dbg(&vscsi->dev, "got interrupt\n");
/*
* if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
- pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
return;
}
@@ -3414,20 +3424,20 @@ cmd_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("handle_crq, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
if (valid)
goto cmd_work;
} else {
- pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
}
- pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
- (int)list_empty(&vscsi->schedule_q), vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
rc = -ENOMEM;
- pr_err("probe: allocation of adapter failed\n");
+ dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
return rc;
}
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
dev_name(&vdev->dev));
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+ dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
rc = read_dma_window(vscsi);
if (rc)
goto free_adapter;
- pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
- vscsi->dds.window[LOCAL].liobn,
- vscsi->dds.window[REMOTE].liobn);
+ dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
* client can connect" and the client isn't activated yet.
* We'll make the call again when he sends an init msg.
*/
- pr_debug("probe hrc %ld, client partition num %d\n",
- hrc, vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
(unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+ dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
spin_lock_bh(&vscsi->intr_lock);
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
* attempt an srp_transfer_data.
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
- pr_err("write_pending failed since: %d\n", vscsi->flags);
+ dev_err(&vscsi->dev, "write_pending failed since: %d\n",
+ vscsi->flags);
return -EIO;
+
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
- pr_err("srp_transfer_data() failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
return -EIO;
}
/*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
- pr_err("srp_transfer_data failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
struct scsi_info *vscsi = cmd->adapter;
uint len;
- pr_debug("queue_status %p\n", se_cmd);
+ dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
- pr_debug("queue_tm_rsp %p, status %d\n",
- se_cmd, (int)se_cmd->se_tmr_req->response);
+ dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
- se_cmd, se_cmd->tag);
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
const char *name)
{
struct ibmvscsis_tport *tport;
+ struct scsi_info *vscsi;
tport = ibmvscsis_lookup_port(name);
if (tport) {
+ vscsi = container_of(tport, struct scsi_info, tport);
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
- pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
- name, tport, tport->tport_proto_id);
+ dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
return &tport->tport_wwn;
}
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
struct ibmvscsis_tport *tport = container_of(wwn,
struct ibmvscsis_tport,
tport_wwn);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- pr_debug("drop_tport(%s)\n",
- config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+ dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
- pr_err("enable_change_state failed, rc %ld state %d\n",
- lrc, vscsi->state);
+ dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+ dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
+ vscsi->state);
return count;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (i == 0) {
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
ioa_cfg->hrrq[i].min_cmd_id = 0;
- ioa_cfg->hrrq[i].max_cmd_id =
- (entries_each_hrrq - 1);
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
} else {
entries_each_hrrq =
IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
/**
* iscsi_sw_tcp_xmit - TCP transmit
+ * @conn: iscsi connection
**/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
@@ -357,6 +358,7 @@ error:
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da46052e179..21be672679fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
case ELS_FLOGI:
if (!lport->point_to_multipoint)
fc_lport_recv_flogi_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_LOGO:
if (fc_frame_sid(fp) == FC_FID_FLOGI)
fc_lport_recv_logo_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_RSCN:
lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2082,7 +2104,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
* @iscsit: iscsi transport template
* @shost: scsi host
* @cmds_max: session can queue
+ * @dd_size: private driver data size, added to session allocation size
* @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
+ * @id: target ID to add to this session
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
/**
* iscsi_conn_teardown - teardown iscsi connection
- * cls_conn: iscsi class connection
+ * @cls_conn: iscsi class connection
*
* TODO: we may need to make this into a two step process
* like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/**
* iscsi_tcp_hdr_recv_done - process PDU header
+ * @tcp_conn: iSCSI TCP connection
+ * @segment: the buffer segment being processed
*
* This is the callback invoked when the PDU header has
* been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
* @conn: iscsi connection
* @skb: network buffer with header and/or data segment
* @offset: offset in skb
- * @offload: bool indicating if transfer was offloaded
+ * @offloaded: bool indicating if transfer was offloaded
+ * @status: iscsi TCP status result
*
- * Will return status of transfer in status. And will return
+ * Will return status of transfer in @status. And will return
* number of bytes copied.
*/
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
/**
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
* @task: scsi command task
- * @sc: scsi command
*/
int iscsi_tcp_task_init(struct iscsi_task *task)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
}
}
-static void sas_probe_devices(struct work_struct *work)
+static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
sas_put_device(dev);
}
-static void sas_destruct_devices(struct work_struct *work)
+void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
}
}
+static void sas_destruct_ports(struct asd_sas_port *port)
+{
+ struct sas_port *sas_port, *p;
+
+ list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
+ list_del_init(&sas_port->del_list);
+ sas_port_delete(sas_port);
+ }
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
- sas_discover_event(dev->port, DISCE_DESTRUCT);
}
}
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
port->port_dev = NULL;
}
+ sas_probe_devices(port);
+
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
+
+ sas_destruct_devices(port);
+ sas_destruct_ports(port);
+ sas_probe_devices(port);
}
/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
- scsi_queue_work(ha->core.shost, &sw->work);
+ queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
- [DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
- [DISCE_DESTRUCT] = sas_destruct_devices,
};
disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- int rc = 0;
+ /* it's added to the defer_q when draining so return succeed */
+ int rc = 1;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
- rc = scsi_queue_work(ha->core.shost, &sw->work);
+ rc = queue_work(ha->event_q, &sw->work);
return rc;
}
-static int sas_queue_event(int event, unsigned long *pending,
- struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
- int rc = 0;
+ unsigned long flags;
+ int rc;
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->lock, flags);
- rc = sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->lock, flags);
- }
+ spin_lock_irqsave(&ha->lock, flags);
+ rc = sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
- struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
+ int ret;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
- drain_workqueue(wq);
+ drain_workqueue(ha->event_q);
+ drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- sas_queue_work(ha, sw);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1)
+ sas_free_event(to_asd_sas_event(&sw->work));
+
}
spin_unlock_irq(&ha->lock);
}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
+ struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
- sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ if (list_empty(&port->phy_list))
+ continue;
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+ ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
}
mutex_unlock(&ha->disco_mutex);
}
+
+static void sas_port_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_port_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
+static void sas_phy_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_phy_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- return sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- return sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ca1566237ae7..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
+ list_add_tail(&phy->port->del_list,
+ &parent->port->sas_port_del_list);
phy->port = NULL;
}
}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- while (res == 0 && dev) {
+ if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
-
- dev = NULL;
- res = sas_find_bcast_dev(port_dev, &dev);
}
return res;
}
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct domain_device *dev;
- unsigned int reslen = 0;
+ unsigned int rcvlen = 0;
int ret = -EINVAL;
/* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
job->reply_payload.sg_list);
- if (ret > 0) {
- /* positive number is the untransferred residual */
- reslen = ret;
+ if (ret >= 0) {
+ /* bsg_job_done() requires the length received */
+ rcvlen = job->reply_payload.payload_len - ret;
ret = 0;
}
out:
- bsg_job_done(job, ret, reslen);
+ bsg_job_done(job, ret, rcvlen);
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
#include "../scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
+ char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+ sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
+
error = sas_register_phys(sas_ha);
if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+ if (!sas_ha->event_q)
+ goto Undo_ports;
+
+ snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
+ sas_ha->disco_q = create_singlethread_workqueue(name);
+ if (!sas_ha->disco_q)
+ goto Undo_event_q;
+
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
+
+Undo_event_q:
+ destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+ destroy_workqueue(sas_ha->disco_q);
+ destroy_workqueue(sas_ha->event_q);
+
return 0;
}
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->port_events_pending = 0;
- phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
.smp_handler = sas_smp_handler,
};
+static inline ssize_t phy_event_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
+}
+
+static inline ssize_t phy_event_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ sha->event_thres = simple_strtol(buf, NULL, 10);
+
+ /* threshold cannot be set too small */
+ if (sha->event_thres < 32)
+ sha->event_thres = 32;
+
+ return count;
+}
+
+DEVICE_ATTR(phy_event_threshold,
+ S_IRUGO|S_IWUSR,
+ phy_event_threshold_show,
+ phy_event_threshold_store);
+EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
+
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+{
+ struct asd_sas_event *event;
+ gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ event = kmem_cache_zalloc(sas_event_cache, flags);
+ if (!event)
+ return NULL;
+
+ atomic_inc(&phy->event_nr);
+
+ if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
+ if (i->dft->lldd_control_phy) {
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ sas_printk("The phy%02d bursting events, shut it down.\n",
+ phy->id);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+ WARN_ONCE(1, "PHY control not supported.\n");
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+ event = NULL;
+ }
+ }
+
+ return event;
+}
+
+void sas_free_event(struct asd_sas_event *event)
+{
+ struct asd_sas_phy *phy = event->phy;
+
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+}
+
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
- return -ENOMEM;
+ goto out;
+
+ sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
+ if (!sas_event_cache)
+ goto free_task_kmem;
return 0;
+free_task_kmem:
+ kmem_cache_destroy(sas_task_cache);
+out:
+ return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
+ kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+void sas_free_event(struct asd_sas_event *event);
+
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
+void sas_destruct_devices(struct asd_sas_port *port);
+
+extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
+extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
}
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
-
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
-
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
-
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
}
+static void sas_phye_shutdown(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (phy->enabled) {
+ int ret;
+
+ phy->error = 0;
+ phy->enabled = 0;
+ ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ if (ret)
+ sas_printk("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
+ } else
+ sas_printk("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
+}
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
- static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
- [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
- [PHYE_OOB_DONE] = sas_phye_oob_done,
- [PHYE_OOB_ERROR] = sas_phye_oob_error,
- [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
- [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
-
- };
-
- static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
- [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
- [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
- [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
- [PORTE_TIMER_EVENT] = sas_porte_timer_event,
- [PORTE_HARD_RESET] = sas_porte_hard_reset,
- };
-
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
- int k;
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
+ atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
- phy->port_events[k].phy = phy;
- }
-
- for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
- phy->phy_events[k].phy = phy;
- }
phy->port = NULL;
phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
return 0;
}
+
+const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+ [PHYE_SHUTDOWN] = sas_phye_shutdown,
+};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
+ sas_destruct_devices(port);
continue;
}
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ flush_workqueue(sas_ha->disco_q);
}
/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
+ sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
-
sas_form_port(phy);
}
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
-
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+
+ if (phy->port)
+ flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
+ INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
+
+const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b728c57..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "sas_internal.h"
@@ -486,15 +487,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
- int res;
+ int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
+ unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
- res = i->dft->lldd_abort_task(task);
+ spin_lock_irqsave(host->host_lock, flags);
+ /* We cannot do async aborts for SATA devices */
+ if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return FAILED;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (task)
+ res = i->dft->lldd_abort_task(task);
+ else
+ SAS_DPRINTK("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
@@ -946,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
sas_put_device(found_dev);
}
-static void sas_parse_addr(u8 *sas_addr, const char *p)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++) {
- u8 h, l;
- if (!*p)
- break;
- h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- sas_addr[i] = (h<<4) | l;
- }
-}
-
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -977,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
goto out;
}
- sas_parse_addr(addr, fw->data);
+ res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
+ if (res)
+ goto out;
out:
release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
+#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
+ uint32_t get_nvme_bufs;
+ uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..d188fb565a32 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
+ lport = (struct lpfc_nvme_lport *)localport->private;
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016x Cmpl %016x\n",
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
+ atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ " noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " Cmpl %016llx Outstanding %016llx Abort %08x\n",
+ tot, ((data1 + data2 + data3) - tot),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
return len;
}
@@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
- 1, 1, 16,
+ LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
@@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
@@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
+ if (!phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
+void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
+void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_flush_rscn(vport);
goto out;
}
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0151 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
len += snprintf(buf + len, size - len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, size - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
+ " Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
+
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "LS Xmt Err: Abrt %08x Err %08x "
+ "Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_abort),
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Xmt Err: noxri %06x nondlp %06x "
+ "qdepth %06x wqerr %06x Abrt %06x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+
}
return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
- if (eqidx < phba->cfg_nvmet_mrq) {
+ if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx eqd %d]\n",
+ "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "ELS RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
- qp = phba->sli4_hba.hdr_rq;
- len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
- "RQpair", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
-
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
+#define NLP_WAIT_FOR_UNREG 0x1
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ } else {
/* This side will wait for the PLOGI, decrement ndlp reference
* count indicating that ndlp can be released when other
* references to it are done.
*/
lpfc_nlp_put(ndlp);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
}
return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
- "Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE)))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x Data x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
-
-
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* Driver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+ ndlp->fc4_prli_sent--;
spin_unlock_irq(shost->host_lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
- /* Ddriver supports multiple FC4 types. Counters matter. */
- vport->fc_prli_sent--;
-
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
- ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
+ /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+ * fields here before any of them can complete.
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = 0;
+
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
-
- lpfc_nlp_put(ndlp);
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
return 0;
}
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
-
- /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
- if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- rjt_exp = LSEXP_REQ_UNSUPPORTED;
- break;
- }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_put(ndlp);
break;
case ELS_CMD_REC:
- /* receive this due to exchange closed */
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_exp = LSEXP_INVALID_OX_RX;
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
- lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0)
- /* Start devloss */
- lpfc_nvme_unregister_port(vport, ndlp);
- else
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
+ }
}
}
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
- * Initiators take the NDLP ref count in
- * the register.
+ * Only NVME Target Rports are registered with
+ * the transport.
*/
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ "ndlp %p did x%x flg x%x st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
#define LPFC_CQ_CNT_256 0x0
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
+#define LPFC_CQ_CNT_WORD7 0x3
uint32_t word1;
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
#define lpfc_cq_eq_id_2_WORD word1
- uint32_t reserved0;
+ uint32_t lpfc_cq_context_count; /* Version 2 Only */
uint32_t reserved1;
};
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
+#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
+#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
- /*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
- ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ } else {
+ error = -ENOMEM;
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
- /*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
- ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
- goto out_iounmap_ctrl;
- }
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
goto out_iounmap_all;
+ }
}
return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
@@ -12138,10 +12227,10 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
-
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb049b4a..87c08ff37ddd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
break;
}
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
return 0;
}
+static uint32_t
+lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ cmd = *payload;
+ if (vport->phba->nvmet_support) {
+ /* Must be a NVME PRLI */
+ if (cmd == ELS_CMD_PRLI)
+ goto out;
+ } else {
+ /* Initiator mode. */
+ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
+ goto out;
+ }
+ return 1;
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+ "state x%x flags x%x\n",
+ cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+}
+
static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
- if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
/* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
- "6115 NVMET ndlp rpi %d state "
- "unknown, state x%x flags x%08x\n",
- ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
+ return ndlp->nlp_state;
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
-
return ndlp->nlp_state;
}
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
- /* Check out PRLI rsp */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-
- /* NVME or FCP first burst must be negotiated for each PRLI. */
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
+ else if (ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
+
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
/* NVME initiator-based functions */
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite);
static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+static struct nvme_fc_port_template lpfc_nvme_template;
/**
* lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *qhandle;
char *str;
+ if (!pnvme_lport->private)
+ return -ENOMEM;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
+ if (!pnvme_lport->private)
+ return;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
struct lpfc_nvme_lport *lport = localport->private;
+ lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
+ "6173 localport %p delete complete\n",
+ lport);
+
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete complete %p\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
+
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
lpfc_nlp_put(ndlp);
rport_err:
- /* This call has to execute as long as the rport is valid.
- * Release any threads waiting for the unreg to complete.
- */
- complete(&rport->rport_unreg_done);
+ return;
}
static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
ndlp, 2, 30, 0);
if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6052 EXIT. issue ls wqe failed lport %p, "
"rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ atomic_inc(&lport->xmt_ls_abort);
spin_lock_irq(&phba->hbalock);
list_del_init(&wqe->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_nvme_lport *lport;
unsigned long flags;
- uint32_t code;
+ uint32_t code, status;
uint16_t cid, sqhd, data;
uint32_t *ptr;
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
- bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ status, wcqe->parameter);
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
nCmd->transferred_length = nCmd->payload_length;
} else {
- lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
- LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
freqpriv->nvme_buf = NULL;
/* NVME targets need completion held off until the abort exchange
- * completes.
+ * completes unless the NVME Rport is getting unregistered.
*/
- if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
+ if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct nvmefc_fcp_req *pnvme_fcreq)
{
int ret = 0;
+ int expedite = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, NULL hw_queue_handle\n");
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- /* Validate pointers. */
- if (!pnvme_lport || !pnvme_rport || !freqpriv) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
- "6117 No Send:IO submit ptrs NULL, lport %p, "
- "rport %p fcreq_priv %p\n",
- pnvme_lport, pnvme_rport, freqpriv);
+ if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
+ freqpriv = pnvme_fcreq->private;
+ if (unlikely(!freqpriv)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
+ /* Currently only NVME Keep alive commands should be expedited
+ * if the driver runs out of a resource. These should only be
+ * issued on the admin queue, qidx 0
+ */
+ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
+ sqe = &((struct nvme_fc_cmd_iu *)
+ pnvme_fcreq->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_keep_alive)
+ expedite = 1;
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
if (lpfc_ncmd == NULL) {
+ atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
+ atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport))
+ return;
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, HW Queue Handle NULL.\n");
+ return;
+ }
+
phba = vport->phba;
+ freqpriv = pnvme_fcreq->private;
+
+ if (unlikely(!freqpriv))
+ return;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ atomic_inc(&lport->xmt_fcp_abort);
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
return num_posted;
}
+static inline struct lpfc_nvme_buf *
+lpfc_nvme_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del_init(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
+ return lpfc_ncmd;
+ }
+ return NULL;
+}
+
/**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* Pointer to lpfc_nvme_buf - Success
**/
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL;
unsigned long iflag = 0;
- int found = 0;
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
- if (!found) {
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
+ if (!lpfc_ncmd) {
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice(&phba->lpfc_nvme_buf_list_put,
&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs += phba->put_nvme_bufs;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (!found)
- return NULL;
return lpfc_ncmd;
}
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs++;
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
}
}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
vport->nvmei_support = 1;
+ atomic_set(&lport->xmt_fcp_noxri, 0);
+ atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
+ atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_wqerr, 0);
+ atomic_set(&lport->xmt_fcp_abort, 0);
+ atomic_set(&lport->xmt_ls_abort, 0);
+ atomic_set(&lport->xmt_ls_err, 0);
+ atomic_set(&lport->cmpl_fcp_xb, 0);
+ atomic_set(&lport->cmpl_fcp_err, 0);
+ atomic_set(&lport->cmpl_ls_xb, 0);
+ atomic_set(&lport->cmpl_ls_err, 0);
+
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
*/
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
+ *
+ * The driver has to wait for the host nvme transport to callback
+ * indicating the localport has successfully unregistered all
+ * resources. Since this is an uninterruptible wait, loop every ten
+ * seconds and print a message indicating no progress.
+ *
+ * An uninterruptible wait is used because of the risk of transport-to-
+ * driver state mismatch.
+ */
+void
+lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ struct lpfc_nvme_lport *lport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u32 wait_tmo;
+ int ret;
+
+ /* Host transport has to clean up and confirm requiring an indefinite
+ * wait. Print a message if a 10 second wait expires and renew the
+ * wait. This is unexpected.
+ */
+ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ while (true) {
+ ret = wait_for_completion_timeout(&lport->lport_unreg_done,
+ wait_tmo);
+ if (unlikely(!ret)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6176 Lport %p Localport %p wait "
+ "timed out. Renewing.\n",
+ lport, vport->localport);
+ continue;
+ }
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6177 Lport %p Localport %p Complete Success\n",
+ lport, vport->localport);
+#endif
+}
+
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
* @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
init_completion(&lport->lport_unreg_done);
ret = nvme_fc_unregister_localport(localport);
- wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Wait for completion. This either blocks
+ * indefinitely or succeeds
+ */
+ lpfc_nvme_lport_unreg_wait(vport, lport);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (!ndlp->nrport)
+ lpfc_nlp_get(ndlp);
+
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
/* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
rport = remote_port->private;
if (ndlp->nrport) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
+ if (ndlp->nrport == remote_port->private) {
+ /* Same remoteport. Just reuse. */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "remoteport %p wwpn 0x%llx, "
+ "Data: x%x x%x %p x%x x%06x\n",
+ remote_port,
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ return 0;
+ }
prev_ndlp = rport->ndlp;
- /* Sever the ndlp<->rport connection before dropping
- * the ndlp ref from register.
+ /* Sever the ndlp<->rport association
+ * before dropping the ndlp ref from
+ * register.
*/
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
+ rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
}
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Clean bind the rport to the ndlp. */
rport->remoteport = remote_port;
rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
+ rport->ndlp = ndlp;
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = rport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
+ "lport %p Remoteport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
+ "x%06x Role x%x, ndlp %p\n",
+ lport, remote_port,
rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ rpinfo.port_id, rpinfo.port_role,
+ ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
- init_completion(&rport->rport_unreg_done);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
+ ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
+ lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
}
-
}
return;
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
"6312 XRI Aborted xri x%x not found\n", xri);
}
+
+/**
+ * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ u32 i, wait_cnt = 0;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Cycle through all NVME rings and make sure all outstanding
+ * WQEs have been removed from the txcmplqs.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_cnt++;
+
+ /* The sleep is 10mS. Every ten seconds,
+ * dump a message. Something is wrong.
+ */
+ if ((wait_cnt % 1000) == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6178 NVME IO not empty, "
+ "cnt %d\n", wait_cnt);
+ }
+ }
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
********************************************************************/
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
+#define LPFC_NVME_WAIT_TMO 10
+#define LPFC_NVME_EXPEDITE_XRICNT 8
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
- /* Add sttats counters here */
+ /* Add stats counters here */
+ atomic_t xmt_fcp_noxri;
+ atomic_t xmt_fcp_bad_ndlp;
+ atomic_t xmt_fcp_qdepth;
+ atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_err;
+ atomic_t cmpl_fcp_xb;
+ atomic_t cmpl_fcp_err;
+ atomic_t cmpl_ls_xb;
+ atomic_t cmpl_ls_err;
};
struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (status)
- atomic_inc(&tgtp->xmt_ls_rsp_error);
- else
- atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ if (tgtp) {
+ if (status) {
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_ls_rsp_aborted);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
+ } else {
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ }
+ }
out:
rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ oxid, size, sid);
+ /* defer repost rcv buffer till .defer_rcv callback */
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- if (tgtp)
+ if (tgtp) {
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
+ }
logerr = LOG_NVME_IOERR;
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
logerr |= LOG_NVME_ABTS;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ else
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
+ }
+
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->rqb_buffer = nvmebuf;
+ ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
+ ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_MRQ_OFF 0xffff
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
@@ -169,6 +168,11 @@ struct lpfc_queue {
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
+ uint16_t page_count; /* Number of pages allocated for this queue */
+ uint16_t page_size; /* size of page allocated for this queue */
+#define LPFC_EXPANDED_PAGE_SIZE 16384
+#define LPFC_DEFAULT_PAGE_SIZE 4096
+ uint16_t chann; /* IO channel this queue is associated with */
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_CQE_EXP_COUNT 4096
#define LPFC_WQE_DEF_COUNT 256
-#define LPFC_WQE128_DEF_COUNT 128
-#define LPFC_WQE128_MAX_COUNT 256
+#define LPFC_WQE_EXP_COUNT 1024
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
- struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.4"
+#define LPFC_DRIVER_VERSION "11.4.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.703.05.00-rc1"
-#define MEGASAS_RELDATE "October 5, 2017"
+#define MEGASAS_VERSION "07.704.04.00-rc1"
+#define MEGASAS_RELDATE "December 7, 2017"
/*
* Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
MFI_CMD_ABORT = 0x6,
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
+ MFI_CMD_NVME = 0x9,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
/*
* Global functions
*/
-extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved:8;
+ u16 reserved:2;
+ u16 support_nvme_passthru:1;
+ u16 support_pl_debug_info:1;
+ u16 support_flash_comp_info:1;
+ u16 support_host_info:1;
+ u16 support_dual_fw_update:1;
+ u16 support_ssc_rev3:1;
u16 fw_swaps_bbu_vpd_info:1;
u16 support_pd_map_target_id:1;
u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
* provide the data in little endian order
*/
u16 fw_swaps_bbu_vpd_info:1;
- u16 reserved:8;
+ u16 support_ssc_rev3:1;
+ /* FW supports CacheCade 3.0, only one SSCD creation allowed */
+ u16 support_dual_fw_update:1;
+ /* FW supports dual firmware update feature */
+ u16 support_host_info:1;
+ /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
+ u16 support_flash_comp_info:1;
+ /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
+ u16 support_pl_debug_info:1;
+ /* FW supports retrieval of PL debug information through apps */
+ u16 support_nvme_passthru:1;
+ /* FW supports NVMe passthru commands */
+ u16 reserved:2;
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:18;
+ u32 reserved:17;
+ u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
- u32 reserved:18;
+ u32 support_nvme_passthru:1;
+ u32 reserved:17;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
u32 nvme_page_size;
u8 adapter_type;
bool consistent_mask_64bit;
+ bool support_nvme_passthru;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index cc54bdb5c712..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
+static bool support_nvme_encapsulation;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
}
- mutex_lock(&instance->hba_mutex);
+ mutex_lock(&instance->reset_mutex);
/* Send DCMD to Firmware and cache the information */
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->hba_mutex);
+ mutex_unlock(&instance->reset_mutex);
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
}
+static ssize_t
+megasas_fw_cmds_outstanding_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
megasas_ldio_outstanding_show, NULL);
+static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
+ megasas_fw_cmds_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
+ &dev_attr_fw_cmds_outstanding,
NULL,
};
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
+ status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
- if (cmd->frame->hdr.cmd_status != 0) {
- if (cmd->frame->hdr.cmd_status !=
- MFI_STAT_NOT_FOUND)
+ if (status != MFI_STAT_OK) {
+ if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- } else
- instance->map_id++;
+ }
+
megasas_return_cmd(instance, cmd);
/*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
- if (MR_ValidateMapInfo(instance))
+ if (status == MFI_STAT_OK &&
+ (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
+ instance->map_id++;
fusion->fast_path_io = 1;
- else
+ } else {
fusion->fast_path_io = 0;
+ }
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info));
if ((instance->adapter_type != MFI_SERIES) &&
- !instance->mask_interrupts)
+ !instance->mask_interrupts) {
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
- else
+ } else {
ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
switch (ret) {
case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
+ instance->support_nvme_passthru =
+ ci->adapter_operations4.support_nvme_passthru;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
+ instance->support_nvme_passthru ? "Yes" : "No");
break;
case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
fusion->stream_detect_by_ld[i] =
- kmalloc(sizeof(struct LD_STREAM_DETECT),
+ kzalloc(sizeof(struct LD_STREAM_DETECT),
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
- if (MR_ValidateMapInfo(instance))
+ if (MR_ValidateMapInfo(instance, instance->map_id))
fusion->fast_path_io = 1;
else
fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd;
struct megasas_evt_log_info *el_info;
dma_addr_t el_info_h = 0;
+ int ret;
cmd = megasas_get_cmd(instance);
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
megasas_set_dma_settings(instance, dcmd, el_info_h,
sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
- DCMD_SUCCESS) {
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = el_info->newest_seq_num;
- eli->oldest_seq_num = el_info->oldest_seq_num;
- eli->clear_seq_num = el_info->clear_seq_num;
- eli->shutdown_seq_num = el_info->shutdown_seq_num;
- eli->boot_seq_num = el_info->boot_seq_num;
- } else
- dev_err(&instance->pdev->dev, "DCMD failed "
- "from %s\n", __func__);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ if (ret != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ goto dcmd_failed;
+ }
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
+
+dcmd_failed:
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
megasas_return_cmd(instance, cmd);
- return 0;
+ return ret;
}
/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->hba_mutex);
mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
*/
atomic_set(&instance->fw_outstanding, 0);
+ atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
- instance->unload = 1;
host = instance->host;
fusion = instance->ctrl_context;
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+ instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
goto skip_firing_dcmds;
@@ -7004,9 +7033,9 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
- if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
+ !instance->support_nvme_passthru)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_iocpacket *ioc;
struct megasas_instance *instance;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
ioc = memdup_user(user_ioc, sizeof(*ioc));
if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /* Adjust ioctl wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Block ioctls in VF mode */
if (instance->requestorId && !allow_vf_ioctls) {
error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting"
- "for controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance)) {
error = -ENODEV;
goto out_up;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
struct megasas_instance *instance;
struct megasas_aen aen;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
if (file->private_data != file) {
printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
return -ENODEV;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock,
- flags);
- break;
- }
-
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting for"
- "controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance))
return -ENODEV;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
}
static DRIVER_ATTR_RW(dbg_lvl);
+static ssize_t
+support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_nvme_encapsulation);
+}
+
+static DRIVER_ATTR_RO(support_nvme_encapsulation);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
+ support_nvme_encapsulation = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_device_change;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+ if (rval)
+ goto err_dcf_support_nvme_encapsulation;
+
return rval;
+err_dcf_support_nvme_encapsulation:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
pci_unregister_driver(&megasas_pci_driver);
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
/*
* This function will Populate Driver Map using firmware raid map
*/
-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_DRV_RAID_MAP_ALL *drv_map =
- fusion->ld_drv_map[(instance->map_id & 1)];
+ fusion->ld_drv_map[(map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
void *raid_map_data = NULL;
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
- fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ fw_map_dyn = fusion->ld_map[(map_id & 1)];
desc_table =
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
- return;
+ return 1;
}
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
} else {
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
- fusion->ld_map[(instance->map_id & 1)];
+ fusion->ld_map[(map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES) {
+ dev_dbg(&instance->pdev->dev,
+ "LD count exposed in RAID map in not valid\n");
+ return 1;
+ }
+
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
sizeof(struct MR_DEV_HANDLE_INFO) *
MAX_RAIDMAP_PHYSICAL_DEVICES);
}
+
+ return 0;
}
/*
* This function will validate Map info data provided by FW
*/
-u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
u16 ld;
u32 expected_size;
-
- MR_PopulateDrvRaidMap(instance);
+ if (MR_PopulateDrvRaidMap(instance, map_id))
+ return 0;
fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ drv_map = fusion->ld_drv_map[(map_id & 1)];
pDrvRaidMap = &drv_map->raidMap;
lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
- struct timeval tv;
+ ktime_t time;
bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
- do_gettimeofday(&tv);
+ time = ktime_get_real();
/* Convert to milliseconds as per FW requirement */
- IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
- (tv.tv_usec / 1000));
+ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
init_frame = (struct megasas_init_frame *)cmd->frame;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+ drv_ops->mfi_capabilities.support_nvme_passthru = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
fusion->fast_path_io = 0;
if (!megasas_get_ld_map_info(instance)) {
- if (MR_ValidateMapInfo(instance)) {
+ if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1;
return 0;
}
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
if (instance->adapter_type == VENTURA_SERIES) {
- spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
- megasas_stream_detect(instance, cmd, &io_info);
- spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
- /* In ventura if stream detected for a read and it is read ahead
- * capable make this IO as LDIO
- */
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
- io_info.isRead && io_info.ra_capable)
- fp_possible = false;
-
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
}
+ if (!fp_possible ||
+ (io_info.isRead && io_info.ra_capable)) {
+ spin_lock_irqsave(&instance->stream_lock,
+ spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock,
+ spinlock_flags);
+ /* In ventura if stream detected for a read and it is
+ * read ahead capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ fp_possible = false;
+ }
+
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
bool refire_cmd = 0;
+ u8 result;
+ u32 opcode = 0;
fusion = instance->ctrl_context;
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
cmd_fusion = fusion->cmd_list[j];
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
smid = le16_to_cpu(cmd_mfi->context.smid);
+ result = REFIRE_CMD;
if (!smid)
continue;
- /* Do not refire shutdown command */
- if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
- MR_DCMD_CTRL_SHUTDOWN) {
- cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- continue;
+ req_desc = megasas_get_request_descriptor(instance, smid - 1);
+
+ switch (cmd_mfi->frame->hdr.cmd) {
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
+ /* Do not refire shutdown command */
+ if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+ result = COMPLETE_CMD;
+ break;
+ }
+
+ refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
+ (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+
+ if (!refire_cmd)
+ result = RETURN_CMD;
+
+ break;
+ case MFI_CMD_NVME:
+ if (!instance->support_nvme_passthru) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
+ default:
+ break;
}
- req_desc = megasas_get_request_descriptor
- (instance, smid - 1);
- refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
- (cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
- && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
- if (refire_cmd)
+ switch (result) {
+ case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
- else
+ break;
+ case RETURN_CMD:
megasas_return_cmd(instance, cmd_mfi);
+ break;
+ case COMPLETE_CMD:
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ break;
+ }
}
}
@@ -4625,8 +4655,6 @@ transition_to_ready:
continue;
}
- megasas_refire_mgmt_cmd(instance);
-
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
retval = FAILED;
goto out;
}
+
+ megasas_refire_mgmt_cmd(instance);
+
/* Reset load balance info */
if (fusion->load_balance_info)
memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
} u;
};
+enum CMD_RET_VALUES {
+ REFIRE_CMD = 1,
+ COMPLETE_CMD = 2,
+ RETURN_CMD = 3,
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
return 1;
}
+static struct scsiio_tracker *
+_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *cmd;
+
+ if (WARN_ON(!smid) ||
+ WARN_ON(smid >= ioc->hi_priority_smid))
+ return NULL;
+
+ cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (cmd)
+ return scsi_cmd_priv(cmd);
+
+ return NULL;
+}
+
/**
* _base_get_cb_idx - obtain the callback index
* @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx;
+ u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ u8 cb_idx = 0xFF;
if (smid < ioc->hi_priority_smid) {
- i = smid - 1;
- cb_idx = ioc->scsi_lookup[i].cb_idx;
+ struct scsiio_tracker *st;
+
+ if (smid < ctl_smid) {
+ st = _get_st_from_smid(ioc, smid);
+ if (st)
+ cb_idx = st->cb_idx;
+ } else if (smid == ctl_smid)
+ cb_idx = ioc->ctl_cb_idx;
} else if (smid < ioc->internal_smid) {
i = smid - ioc->hi_priority_smid;
cb_idx = ioc->hpr_lookup[i].cb_idx;
} else if (smid <= ioc->hba_queue_depth) {
i = smid - ioc->internal_smid;
cb_idx = ioc->internal_lookup[i].cb_idx;
- } else
- cb_idx = 0xFF;
+ }
return cb_idx;
}
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
- * @smid: smid associated to an IO request
+ * @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
-_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->scsi_lookup[smid - 1].chain_list);
+ list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+ return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+ return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
@@ -2822,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
- unsigned long flags;
- struct scsiio_tracker *request;
+ struct scsiio_tracker *request = scsi_cmd_priv(scmd);
+ unsigned int tag = scmd->request->tag;
u16 smid;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
-
- request = list_entry(ioc->free_list.next,
- struct scsiio_tracker, tracker_list);
- request->scmd = scmd;
+ smid = tag + 1;
request->cb_idx = cb_idx;
- smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ request->smid = smid;
+ INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
return smid;
}
+static void
+_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
+{
+ /*
+ * See _wait_for_commands_to_complete() call with regards to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ if (ioc->pending_io_count == 0)
+ wake_up(&ioc->reset_wq);
+ }
+}
+
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st)
+{
+ if (WARN_ON(st->smid == 0))
+ return;
+ st->cb_idx = 0xFF;
+ st->direct_io = 0;
+ if (!list_empty(&st->chain_list)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ list_splice_init(&st->chain_list, &ioc->free_chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ }
+}
+
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
int i;
- struct chain_tracker *chain_req, *next;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add(&chain_req->tracker_list,
- &ioc->free_chain_list);
- }
- }
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ struct scsiio_tracker *st;
- /*
- * See _wait_for_commands_to_complete() call with regards
- * to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
+ st = _get_st_from_smid(ioc, smid);
+ if (!st) {
+ _base_recovery_check(ioc);
+ return;
}
+ mpt3sas_base_clear_st(ioc, st);
+ _base_recovery_check(ioc);
return;
- } else if (smid < ioc->internal_smid) {
+ }
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
- pci_pool_free(ioc->pcie_sgl_dma_pool,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
- pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- if (ioc->scsi_lookup) {
- free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
- ioc->scsi_lookup = NULL;
- }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
- ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->scsi_lookup_pages);
- if (!ioc->scsi_lookup) {
- pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
- ioc->name, (int)sz);
- goto out;
- }
-
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
+ sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
+ ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->pcie_sg_lookup) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ goto out;
+ }
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
- pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_create failed\n",
+ "PCIe SGL pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
- pci_pool_alloc(ioc->pcie_sgl_dma_pool,
- GFP_KERNEL,
- &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
- if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+ ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_alloc failed\n",
+ "PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
goto out;
}
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
- /* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->free_list);
- smid = 1;
- for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
- INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].smid = smid;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
- }
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
- * This function waiting(3s) for all pending commands to complete
+ * This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
static void
_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
- unsigned long flags;
- u16 i;
ioc->pending_io_count = 0;
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 0; i < ioc->scsiio_depth; i++)
- if (ioc->scsi_lookup[i].cb_idx != 0xFF)
- ioc->pending_io_count++;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
if (!ioc->pending_io_count)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
- * @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
- * @tracker_list: list of free request (ioc->free_list)
+ * @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
- struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
- struct list_head tracker_list;
u16 msix_io;
};
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct scsiio_tracker *scsi_lookup;
- ulong scsi_lookup_pages;
+ struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
- struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
+ u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
+ struct scsi_cmnd *scmd);
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
+struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
-u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid);
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b4c374b08e5e..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -534,7 +534,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-static unsigned int
+static __poll_t
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
- u16 i;
+ u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
- unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = ioc->scsiio_depth; i && !found; i--) {
- scmd = ioc->scsi_lookup[i - 1].scmd;
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
+ for (smid = ioc->scsiio_depth; smid && !found; smid--) {
+ struct scsiio_tracker *st;
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
- tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ st = scsi_cmd_priv(scmd);
+ tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
} else {
-
- smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
- if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
+ /* Use first reserved smid for passthrough ioctls */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
}
ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * __scsih_scsi_lookup_get_clear - returns scmd entry without
- * holding any lock.
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
-static inline struct scsi_cmnd *
-__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
- u16 smid)
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
- swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag = smid - 1;
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
- */
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
-{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_target - search for matching channel:id
- * @ioc: per adapter object
- * @id: target id
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
- int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
- * @ioc: per adapter object
- * @id: target id
- * @lun: lun number
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id:lun in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF)
+ scmd = NULL;
}
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return scmd;
}
/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
- * @channel: the channel assigned by the OS
- * @id: the id assigned by the OS
+ * @handle: device handle
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- u16 msix_task = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup->msix_io < ioc->reply_queue_count))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
-
- switch (type) {
- case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
- break;
- rc = FAILED;
- break;
-
- case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
+ rc = SUCCESS;
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
- smid_task, timeout);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
+ msix_task, timeout);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
- mpt3sas_base_free_smid(ioc, smid);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
pcie_device = sas_target_priv_data->pcie_dev;
if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
- raid_device, mpi_request, smid);
+ raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
- unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
- ioc->got_task_abort_from_ioctl)
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
- else
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
-
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt3sas_scsi_direct_io_set(ioc, smid, 0);
+ st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
-
+ mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
- return 1;
+ return 0;
}
/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* Context: user.
*
*/
-static int
+static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u8 link_rate, prev_link_rate;
unsigned long flags;
int rc;
- int requeue_event;
Mpi26EventDataPCIeTopologyChangeList_t *event_data =
(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery)
- return 0;
+ return;
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
ioc->name));
- return 0;
+ return;
}
/* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"ignoring switch event\n", ioc->name));
- return 0;
+ return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
+ return;
reason_code = event_data->PortEntry[i].PortStatus;
handle =
le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
}
}
- return requeue_event;
}
/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
- 30);
- if (r == FAILED) {
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
+ st->msix_io, 30);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
}
/**
- * mpt3sas_scsi_direct_io_get - returns direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-inline u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].direct_io;
-}
-
-/**
- * mpt3sas_scsi_direct_io_set - sets direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- * @direct_io: Zero or non-zero value to set in the direct_io flag
- *
- * Returns Nothing.
- */
-inline void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
-{
- ioc->scsi_lookup[smid - 1].direct_io = direct_io;
-}
-
-/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
* @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid)
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
- mpt3sas_scsi_direct_io_set(ioc, smid, 1);
+ st->direct_io = 1;
}
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index a4f28b7e4c65..e18877177f1b 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
return req;
for_each_bio(bio) {
- ret = blk_rq_append_bio(req, bio);
+ struct bio *bounce_bio = bio;
+
+ ret = blk_rq_append_bio(req, &bounce_bio);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
{
int minor;
- minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
__set_bit(minor, pmcraid_minor);
return minor;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
+ /* fall through */
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
+ /* fall through */
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 7d91e53562f8..a980ef756a67 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val;
bool slow_sgl;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
y_st_ctx = &ctx->ystorm_st_context;
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
/* Tstorm ctx */
t_st_ctx = &ctx->tstorm_st_context;
- t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
- FCOE_TASK_DEV_TYPE_TAPE :
- FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ m_st_ctx->sgl_params.sgl_num_sges =
+ cpu_to_le16(sgl_task_params->num_sges);
} else {
/* Tstorm ctx */
SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
sgl_task_params);
}
+ /* Init Sqe */
init_common_sqe(task_params, SEND_FCOE_CMD);
+
return 0;
}
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct fcoe_task_context *ctx = task_params->context;
+ struct e4_fcoe_task_context *ctx = task_params->context;
+ const u8 val_byte = ctx->ystorm_ag_context.byte0;
+ struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
- struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val;
memset(ctx, 0, sizeof(*(ctx)));
+ ctx->ystorm_ag_context.byte0 = val_byte;
/* Init Ystorm */
y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
- y_st_ctx->task_type = task_params->task_type;
+ y_st_ctx->task_type = (u8)task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
- t_st_ctx->read_only.task_type = task_params->task_type;
+ t_st_ctx->read_only.task_type = (u8)task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
}
int init_initiator_sequence_recovery_fcoe_task(
- struct fcoe_task_params *task_params, u32 off)
+ struct fcoe_task_params *task_params, u32 desired_offset)
{
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
- task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ task_params->sqe->additional_info_union.seq_rec_updated_offset =
+ desired_offset;
return 0;
}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index f9c50faa748e..b5c236efd465 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -13,7 +13,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct fcoe_task_context *context;
+ struct e4_fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9bf7b227e69a..c105a2e48ac1 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -129,7 +129,7 @@ struct qedf_ioreq {
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca4cda9..aa22b11436ba 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 7faef80c5f7a..503c1ae3ccd0 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
MAX_FCOE_CQE_TYPE
};
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
- FCOE_TASK_DEV_TYPE_DISK,
- FCOE_TASK_DEV_TYPE_TAPE,
- MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
/*
* FCoE fast path error codes
*/
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
MAX_FCOE_SP_ERROR_CODE
};
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
- SEND_FCOE_CMD,
- SEND_FCOE_MIDPATH,
- SEND_FCOE_ABTS_REQUEST,
- FCOE_EXCHANGE_CLEANUP,
- FCOE_SEQUENCE_RECOVERY,
- SEND_FCOE_XFER_RDY,
- SEND_FCOE_RSP,
- SEND_FCOE_RSP_WITH_SENSE_DATA,
- SEND_FCOE_TARGET_DATA,
- SEND_FCOE_INITIATOR_DATA,
- /*
- * Xfer Continuation (==1) ready to be sent. Previous XFERs data
- * received successfully.
- */
- SEND_FCOE_XFER_CONTINUATION_RDY,
- SEND_FCOE_TARGET_ABTS_RSP,
- MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
/*
* FCoE task TX state
*/
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
MAX_FCOE_TASK_TX_STATE
};
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
- FCOE_TASK_TYPE_WRITE_INITIATOR,
- FCOE_TASK_TYPE_READ_INITIATOR,
- FCOE_TASK_TYPE_MIDPATH,
- FCOE_TASK_TYPE_UNSOLICITED,
- FCOE_TASK_TYPE_ABTS,
- FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
- FCOE_TASK_TYPE_WRITE_TARGET,
- FCOE_TASK_TYPE_READ_TARGET,
- FCOE_TASK_TYPE_RSP,
- FCOE_TASK_TYPE_RSP_SENSE_DATA,
- FCOE_TASK_TYPE_ABTS_TARGET,
- FCOE_TASK_TYPE_ENUM_SIZE,
- MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
- /* Start physical address for the RQ (receive queue) PBL. */
- struct regpair rq_pbl_addr;
- /* Start physical address for the CQ (completion queue) PBL. */
- struct regpair cq_pbl_addr;
- /* Start physical address for the CMDQ (command queue) PBL. */
- struct regpair cmdq_pbl_addr;
-};
-
#endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ded386036c27..b15e69586a36 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
u16 xid, rval;
- struct fcoe_task_context *task_ctx;
+ struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp;
struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
struct qedf_rport *fcport;
struct qedf_ctx *qedf;
uint16_t xid;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct fcoe_task_context *task;
+ struct e4_fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
struct qedf_io_work *io_work;
u32 bdq_idx;
void *bdq_addr;
+ struct scsi_bd *p_bd_info;
+ p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
- "address.hi=%x address.lo=%x opaque_data.hi=%x "
- "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
- le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
- qedf->bdq_prod_idx, pktlen);
-
- bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+ "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+ le32_to_cpu(p_bd_info->address.hi),
+ le32_to_cpu(p_bd_info->address.lo),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+ le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+ qedf->bdq_prod_idx, pktlen);
+
+ bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
if (bdq_idx >= QEDF_BDQ_SIZE) {
QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
bdq_idx);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7c0064500cc5..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
- pbl->opaque.hi = 0;
+ pbl->opaque.fcoe_opaque.hi = 0;
/* Opaque lo data is an index into the BDQ array */
- pbl->opaque.lo = cpu_to_le32(i);
+ pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
pbl++;
}
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ rc = -ENOMEM;
goto err5;
}
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 397b3b8ee51a..c2478056356a 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
* this source tree.
*/
-#define QEDF_VERSION "8.20.5.0"
+#define QEDF_VERSION "8.33.0.20"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 20
-#define QEDF_DRIVER_REV_VER 5
-#define QEDF_DRIVER_ENG_VER 0
+#define QEDF_DRIVER_MINOR_VER 33
+#define QEDF_DRIVER_REV_VER 0
+#define QEDF_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 39d77818a677..fd8a1eea3163 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block *sb = NULL;
+ struct status_block_e4 *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_PROD_INDEX_MASK);
+ STATUS_BLOCK_E4_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index bd302d3cb9af..667d7697ba01 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
qedi_cmd = task->dd_data;
- qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
if (!qedi_cmd->tmf_resp_buf) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate resp buf, cid=0x%x\n",
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
(qedi->bdq_prod_idx % qedi->rq_num_entries));
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
- cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+ "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
struct scsi_bd *pbl;
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, idx);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
/* Increment producer to let f/w know we've handled the frame */
qedi->bdq_prod_idx += count;
@@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
@@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 7df32a68bd54..a269da1a6c75 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct iscsi_task_context *context;
- u16 index;
+ struct e4_iscsi_task_context *context;
u32 val;
+ u16 index;
+ u8 val_byte;
context = task_params->context;
+ val_byte = context->mstorm_ag_context.cdu_validation;
memset(context, 0, sizeof(*context));
+ context->mstorm_ag_context.cdu_validation = val_byte;
for (index = 0; index <
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
- u32 remaining_recv_len,
- u32 expected_data_transfer_len,
- u8 num_sges, bool tx_dif_conn_err_en)
+ struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len, u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
{
u32 val;
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
- SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0,
- RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDGUARD,
+ RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
- RDIF_TASK_CONTEXT_INTERVALSIZE,
+ RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state,
- RDIF_TASK_CONTEXT_REFTAGMASK,
+ RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
- SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag);
}
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
- tdif_context->partial_crc_valueB =
+ tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
- SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
- TDIF_TASK_CONTEXT_REFTAGMASK,
+ TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0,
- TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0);
}
}
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
cxt = task_params->context;
- val = cpu_to_le32(task_size);
- cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
- init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
- cmd_params);
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
- cxt->mstorm_st_context.sense_db.lo = val;
- val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
- cxt->mstorm_st_context.sense_db.hi = val;
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+ set_local_completion_context(cxt);
+ } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+ val = cpu_to_le32(task_size +
+ ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+ cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+ cxt->mstorm_st_context.expected_itt =
+ cpu_to_le32(pdu_header->itt);
+ } else {
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+ val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+ }
if (task_params->tx_io_size) {
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
dif_task_params);
+ init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+ dif_task_params);
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct iscsi_task_context *cxt;
+ struct e4_iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index b6f24f91849d..c3deb77ac388 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -13,7 +13,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct iscsi_task_context *context;
+ struct e4_iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 63d793f46064..f5b5a31999aa 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
- struct async_data *data);
+ struct iscsi_eqe_data *data);
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn);
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data);
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index a02b34ea5cab..7ec7f6e00fb8 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
conn_info->dup_ack_theshold = 3;
conn_info->rcv_wnd = 65535;
- conn_info->cwnd = DEF_MAX_CWND;
conn_info->ss_thresh = 65535;
conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
(qedi_ep->ip_type == TCP_IPV6),
1, (qedi_ep->vlan_id != 0));
+ conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
conn_info->rcv_wnd_scale = 4;
- conn_info->ts_ticks_per_second = 1000;
conn_info->da_timeout_value = 200;
conn_info->ack_frequency = 2;
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
return msg;
}
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+ struct iscsi_eqe_data *data)
{
struct qedi_conn *qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 3247287cb0e7..ea1315189922 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct iscsi_task_context request;
+ struct e4_iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cccc34adc0e0..029e2e69b29f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
- struct async_data *data;
+ struct iscsi_eqe_data *data;
int rval = 0;
if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
- data = (struct async_data *)fw_handle;
+ data = (struct iscsi_eqe_data *)fw_handle;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
- data->cid, data->itid, data->error_code,
- data->fw_debug_param);
+ "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+ data->icid, data->conn_id, data->error_code,
+ data->error_pdu_opcode_reserved);
- qedi_ep = qedi->ep_tbl[data->cid];
+ qedi_ep = qedi->ep_tbl[data->icid];
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
"Cannot process event, ep already disconnected, cid=0x%x\n",
- data->cid);
+ data->icid);
WARN_ON(1);
return -ENODEV;
}
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block), &sb_phys,
+ sizeof(struct status_block_e4), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
- qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
err_alloc_mem:
return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -998,7 +997,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
if (ret)
- continue;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Dropping CQE 0x%x for cid=0x%x.\n",
+ que->cq_cons_idx, cqe->cqe_common.conn_id);
que->cq_cons_idx++;
if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1015,7 +1016,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
@@ -1262,22 +1263,22 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, i);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
pbl++;
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
- PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -1367,11 +1368,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1379,14 +1379,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq, 0,
- qedi->global_queues[i]->cq_mem_size);
-
- qedi->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1394,8 +1390,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq_pbl, 0,
- qedi->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1456,25 +1450,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- memset(ep->sq, 0, ep->sq_mem_size);
-
- ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
rval = -ENOMEM;
goto out_free_sq;
}
- memset(ep->sq_pbl, 0, ep->sq_pbl_size);
/* Create PBL */
num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index d61e3ac22e67..8a0e523fc089 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.4.0"
+#define QEDI_MODULE_VERSION "8.33.0.20"
#define QEDI_DRIVER_MAJOR_VER 8
-#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 4
-#define QEDI_DRIVER_ENG_VER 0
+#define QEDI_DRIVER_MINOR_VER 33
+#define QEDI_DRIVER_REV_VER 0
+#define QEDI_DRIVER_ENG_VER 20
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vfree(vha->scan.l);
+
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
ha->optrom_state = QLA_SREADING;
}
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
return -ENOMEM;
}
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
return 0;
}
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
return -ENOMEM;
}
- memset(stats, 0, sizeof(*stats));
-
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
* There is no correspondence between an N-PORT id and an AL_PA. Therefore the
* valid range of an N-PORT id is 0 through 0x7ef.
*/
-#define NPH_LAST_HANDLE 0x7ef
-#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_LAST_HANDLE 0x7ee
+#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define FW_DEF_EXCHANGES_CNT 2048
+#define FW_MAX_EXCHANGES_CNT (32 * 1024)
+#define REDUCE_EXCHANGES_CNT (8 * 1024)
struct req_que;
struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
u32 rsp_size;
void *req;
void *rsp;
+ port_id_t id;
};
/*
@@ -416,6 +442,7 @@ struct srb_iocb {
struct {
uint32_t cmd_hndl;
__le16 comp_status;
+ __le16 req_que_no;
struct completion comp;
} abt;
struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
uint32_t timeout_sec;
struct list_head entry;
} nvme;
+ struct {
+ u16 cmd;
+ u16 vp_index;
+ } ctrlvp;
} u;
struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
+#define SRB_CTRL_VP 22
+#define SRB_PRLO_CMD 23
enum {
TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
+ int rc;
+ int retry_count;
+ struct completion comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
/*
* Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
+ DSC_GNN_ID,
DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
+ DSC_GFPN_ID,
DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
+ DSC_ADISC,
DSC_DELETE_PEND,
};
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
FCME_GPDB_DONE,
FCME_GPNID_DONE,
FCME_GFFID_DONE,
- FCME_DELETE_DONE,
+ FCME_ADISC_DONE,
+ FCME_GNNID_DONE,
+ FCME_GFPNID_DONE,
};
enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
+ unsigned int free_pending:1;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
unsigned int query:1;
+ unsigned int id_changed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
+#define FCF_ASYNC_ACTIVE BIT_5
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
#define GA_NXT_REQ_SIZE (16 + 4)
#define GA_NXT_RSP_SIZE (16 + 620)
+#define GPN_FT_CMD 0x172
+#define GPN_FT_REQ_SIZE (16 + 4)
+#define GNN_FT_CMD 0x173
+#define GNN_FT_REQ_SIZE (16 + 4)
+
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
} port_id;
struct {
+ uint8_t reserved;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t port_type;
+ } gpn_ft;
+
+ struct {
uint8_t port_type;
uint8_t domain;
uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
uint8_t port_id[3];
};
+/* It's the same for both GPN_FT and GNN_FT */
+struct ct_sns_gpnft_rsp {
+ struct {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+ };
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gpn_ft_data {
+ u8 control_byte;
+ u8 port_id[3];
+ u32 reserved;
+ u8 port_name[8];
+ } entries[1];
+};
+
+/* CT command response */
struct ct_sns_rsp {
struct ct_rsp_hdr header;
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
} p;
};
+struct ct_sns_gpnft_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_gpnft_rsp rsp;
+ } p;
+};
+
+enum scan_flags_t {
+ SF_SCANNING = BIT_0,
+ SF_QUEUED = BIT_1,
+};
+
+struct fab_scan_rp {
+ port_id_t id;
+ u8 port_name[8];
+ u8 node_name[8];
+};
+
+struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
+ u16 scan_retry;
+#define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+ struct delayed_work scan_work;
+};
+
/*
* SNS command structures -- for 2200 compatibility.
*/
@@ -3117,7 +3200,7 @@ enum qla_work_type {
QLA_EVT_AENFX,
QLA_EVT_GIDPN,
QLA_EVT_GPNID,
- QLA_EVT_GPNID_DONE,
+ QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+ QLA_EVT_GPNFT,
+ QLA_EVT_GPNFT_DONE,
+ QLA_EVT_GNNFT_DONE,
+ QLA_EVT_GNNID,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
};
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
struct {
port_id_t id;
u8 port_name[8];
+ u8 node_name[8];
void *pla;
+ u8 fc4_type;
} new_sess;
struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
u8 iocb[IOCB_SIZE];
int type;
} nack;
+ struct {
+ u8 fc4_type;
+ } gpnft;
} u;
};
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
-#define QLA_EARLY_LINKUP(_ha) \
- ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
- _ha->flags.fw_started && !_ha->flags.fw_init_done)
-
/*
* Qlogic host adapter specific data structure.
*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
+ uint32_t rida_fmt2:1;
} flags;
+ uint16_t max_exchg;
uint16_t long_range_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
- void *swl;
+ void *swl;
/* These are used by mailbox operations. */
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
+#define IOCB_WORK_ACTIVE 31
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ struct list_head gpnid_list;
+ struct fab_scan scan;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+#define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+ _ha->prev_topology = _ha->current_topology; \
+}
+
+#define N2N_TOPO(ha) \
+ ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
+ ha->current_topology == ISP_CFG_N || \
+ !ha->current_topology)
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
- struct qla_hw_data *ha = vha->hw;
+ uint16_t mb[MAX_IOCB_MB_REG];
+ int rc;
+
+ rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ if (rc != QLA_SUCCESS) {
+ seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
+ } else {
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
+ seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "MAX VP count[%d]\n", mb[11]);
+ seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
+ seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
+ mb[20]);
+ seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
+ mb[21]);
+ seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
+ mb[22]);
+ seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
+ mb[23]);
- seq_puts(s, "FW Resource count\n\n");
- seq_printf(s, "Original TGT exchg count[%d]\n",
- ha->orig_fw_tgt_xcb_count);
- seq_printf(s, "current TGT exchg count[%d]\n",
- ha->cur_fw_tgt_xcb_count);
- seq_printf(s, "original Initiator Exchange count[%d]\n",
- ha->orig_fw_xcb_count);
- seq_printf(s, "Current Initiator Exchange count[%d]\n",
- ha->cur_fw_xcb_count);
- seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
- seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
- seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
- seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+ }
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_name[8];
uint8_t node_name[8];
- uint32_t remote_nport_id;
+ uint8_t remote_nport_id[4];
uint32_t reserved_5;
} f2;
} u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
-int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
- void *);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
+ void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
+extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
/*
* Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
+void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
+int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
+void qla_scan_work_fn(struct work_struct *);
+
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
struct fc_port *, enum qlt_plogi_link_t);
void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
-extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
-extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *);
extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_rft_id(scsi_qla_host_t *);
static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
+static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
+static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
+static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
+ case CS_TIMEOUT:
+ rval = QLA_FUNCTION_TIMEOUT;
+ /* fall through */
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
+static void qla2x00_async_sns_sp_done(void *s, int rc)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_pkt *ct_sns;
+ struct qla_work_evt *e;
+
+ sp->rc = rc;
+ if (rc == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s exiting normally.\n",
+ sp->name);
+ } else if (rc == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s timeout\n", sp->name);
+ } else {
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ sp->retry_count++;
+ if (sp->retry_count > 3)
+ goto err;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s fail rc %x. Retry count %d\n",
+ sp->name, rc, sp->retry_count);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
+ if (!e)
+ goto err2;
+
+ del_timer(&sp->u.iocb_cmd.timer);
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ return;
+ }
+
+err:
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+err2:
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_rft_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFT_ID_REQ_SIZE;
- arg.rsp_size = RFT_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rftid(vha, &vha->d_id);
+}
- /* Issue RFT_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rft_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
- RFT_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2044,
- "RFT_ID exiting normally.\n");
+ goto done_free_sp;
}
-
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+ return rval;
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
int
qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFF_ID_REQ_SIZE;
- arg.rsp_size = RFF_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
+ FC4_TYPE_FCP_SCSI);
+}
- /* Issue RFF_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 fc4feature, u8 fc4type)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
- RFF_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
- /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rff_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- qlt_rff_id(vha, ct_req);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = d_id->b.domain;
+ ct_req->req.rff_id.port_id[1] = d_id->b.area;
+ ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.fc4_feature = fc4feature;
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2048,
- "RFF_ID exiting normally.\n");
+ goto done_free_sp;
}
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
int
qla2x00_rnn_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RNN_ID_REQ_SIZE;
- arg.rsp_size = RNN_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
+}
- /* Issue RNN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 *node_name)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
-
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204e,
- "RNN_ID exiting normally.\n");
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
int
qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = 0;
- arg.rsp_size = RSNN_NN_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rsnn_nn(vha);
+}
- /* Issue RSNN_NN */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rsnn_nn";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
- RSNN_NN_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
/* Prepare CT arguments -- node_name, symbolic node_name, size */
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
/* Prepare the Symbolic Node Name */
qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
sizeof(ct_req->req.rsnn_nn.sym_node_name));
-
- /* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
(uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
- /* Update MS IOCB request */
- ms_pkt->req_bytecount =
- cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2051,
- "RSNN_NN issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2052,
- "RSNN_NN exiting normally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC login state %d\n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ "%s %8phC generation changed rscn %d|%d n",
__func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ fcport->rscn_gen);
return;
}
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable plugged into the same place */
switch (vha->host->active_mode) {
case MODE_TARGET:
- /* NOOP. let the other guy login to us.*/
+ if (fcport->fw_login_state ==
+ DSC_LS_PRLI_COMP) {
+ u16 data[2];
+ /*
+ * Late RSCN was delivered.
+ * Remote port already login'ed.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "%s %d %8phC post adisc\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ }
break;
case MODE_INITIATOR:
case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post %s\n", __func__,
__LINE__, fcport->port_name,
(atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "gpdb" : "gnl");
+ FCS_ONLINE) ? "adisc" : "gnl");
if (atomic_read(&fcport->state) ==
- FCS_ONLINE)
- qla24xx_post_gpdb_work(vha,
- fcport, PDO_FORCE_ADISC);
- else
+ FCS_ONLINE) {
+ u16 data[2];
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ } else {
qla24xx_post_gnl_work(vha,
fcport);
+ }
break;
}
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
- if (fcport->deleted == QLA_SESS_DELETED) {
+ fcport->id_changed = 1;
+ if (fcport->deleted != QLA_SESS_DELETED) {
ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
struct event_arg ea;
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
- sp->name, res, fcport->port_name, id);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s WWPN %8phC timed out.\n",
+ sp->name, fcport->port_name);
+ qla24xx_post_gidpn_work(sp->vha, fcport);
+ sp->free(sp);
+ return;
+ } else if (res) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s fail res %x, WWPN %8phC\n",
+ sp->name, res, fcport->port_name);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s good WWPN %8phC ID %3phC\n",
+ sp->name, fcport->port_name, id);
+ }
qla2x00_fcport_event_handler(vha, &ea);
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GID_PN;
fcport->scan_state = QLA_FCPORT_SCAN;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gidpn";
sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+}
+
static void qla24xx_async_gpsc_sp_done(void *s, int res)
{
struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3055,6 +3309,7 @@ done:
ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
+ ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
return qla2x00_post_work(vha, e);
}
-void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport;
- unsigned long flags;
+ fc_port_t *fcport, *conflict, *t;
+ u16 data[2];
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d port_id: %06x\n",
+ __func__, __LINE__, ea->id.b24);
- if (fcport) {
- /* cable moved. just plugged in */
- fcport->rscn_gen++;
- fcport->d_id = ea->id;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0x2064,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- break;
+ if (ea->rc) {
+ /* cable is disconnected */
+ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
+ if (fcport->d_id.b24 == ea->id.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->disc_state);
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ }
}
} else {
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ /* cable is connected */
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ if (fcport) {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if ((conflict->d_id.b24 == ea->id.b24) &&
+ (fcport != conflict)) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
+
+ fcport->rscn_gen++;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ switch (fcport->disc_state) {
+ case DSC_LOGIN_COMPLETE:
+ /* recheck session is still intact. */
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC revalidate session with ADISC\n",
+ __func__, __LINE__, fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ fcport->d_id = ea->id;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ fcport->d_id = ea->id;
+ break;
+ default:
+ fcport->d_id = ea->id;
+ break;
+ }
+ } else {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if (conflict->d_id.b24 == ea->id.b24) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+ qla24xx_post_newsess_work(vha, &ea->id,
+ ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct event_arg ea;
struct qla_work_evt *e;
+ unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ if (res)
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+ sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GPNID_DONE;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (res) {
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+ } else if (sp->gen1) {
+ /* There was another RSCN for this Nport ID */
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+
qla2x00_fcport_event_handler(vha, &ea);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
+ srb_t *sp, *tsp;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online)
goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnid";
+ sp->u.iocb_cmd.u.ctarg.id = *id;
+ sp->gen1 = 0;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+ tsp->gen1++;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sp->free(sp);
+ goto done;
+ }
+ }
+ list_add_tail(&sp->elem, &vha->gpnid_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+
+/* GPN_FT + GNN_FT*/
+static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ u64 twwn;
+ int rc = 0;
+
+ if (!ha->num_vhosts)
+ return 0;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ twwn = wwn_to_u64(vp->port_name);
+ if (wwn == twwn) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return rc;
+}
+
+void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ fc_port_t *fcport;
+ u32 i, rc;
+ bool found;
+ u8 fc4type = sp->gen2;
+ struct fab_scan_rp *rp;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
+ if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan stop due to chip reset %x/%x\n",
+ sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
+ goto out;
+ }
+
+ rc = sp->rc;
+ if (rc) {
+ vha->scan.scan_retry++;
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Fabric scan failed on all retries.\n");
+ }
+ goto out;
+ }
+ vha->scan.scan_retry = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ u64 wwn;
+
+ rp = &vha->scan.l[i];
+ found = false;
+
+ wwn = wwn_to_u64(rp->port_name);
+ if (wwn == 0)
+ continue;
+
+ if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((rp->id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp(vha, wwn))
+ continue;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ }
+ break;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, rp->port_name);
+ qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
+ rp->node_name, NULL, fc4type);
+ }
+ }
+
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ } else
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
+out:
+ qla24xx_sp_unmap(vha, sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+ int i, j, k;
+ u16 cmd = be16_to_cpu(ct_req->command);
+
+ /* gen2 field is holding the fc4type */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x FC4Type %x\n",
+ sp->name, res, sp->gen2);
+
+ if (res) {
+ unsigned long flags;
+
+ sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s rescan failed on all retries\n",
+ sp->name);
+ }
+ return;
+ }
+
+ if (!res) {
+ port_id_t id;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ } else {/* GNN_FT_CMD */
+ for (k = 0; k < vha->hw->max_fibre_devices;
+ k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (cmd == GPN_FT_CMD)
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
+ else
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
+ if (!e) {
+ /* please ignore kernel warning. Otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp->rc = res;
+ e->u.iosb.sp = sp;
+
+ qla2x00_post_work(vha, e);
+}
+
+/*
+ * Get WWNN list for fc4_type
+ *
+ * It is assumed the same SRB is re-used from GPNFT to avoid
+ * mem free & re-alloc
+ */
+static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+ u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: req %p rsp %p are not setup\n",
+ __func__, sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.rsp);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ WARN_ON(1);
+ goto done_free_sp;
+ }
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+ sp->u.iocb_cmd.u.ctarg.rsp_size);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+} /* GNNFT */
+
+void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+ del_timer(&sp->u.iocb_cmd.timer);
+ qla24xx_async_gnnft(vha, sp, sp->gen2);
+}
+
+/* Get WWPN list for certain fc4_type */
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+ u32 rspsz;
+ unsigned long flags;
+
+ if (!vha->flags.online)
+ return rval;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ return rval;
+ }
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ memset(vha->scan.l, 0, vha->scan.size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+}
+
+void qla_scan_work_fn(struct work_struct *work)
+{
+ struct fab_scan *s = container_of(to_delayed_work(work),
+ struct fab_scan, scan_work);
+ struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
+ scan);
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule loop resync\n", __func__);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_QUEUED;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+/* GNN_ID */
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ qla24xx_post_gnl_work(vha, ea->fcport);
+}
+
+static void qla2x00_async_gnnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
+ struct event_arg ea;
+ u64 wwnn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwnn = wwn_to_u64(node_name);
+ if (wwnn)
+ memcpy(fcport->node_name, node_name, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->node_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GNN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+
+ /* GNN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gnnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+/* GPFN_ID */
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_gpsc_work(vha, fcport);
+}
+
+static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
+ struct event_arg ea;
+ u64 wwn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwn = wwn_to_u64(fpn);
+ if (wwn)
+ memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GFPNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->fabric_port_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GFPN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gfpnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+
+ /* GFPN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gfpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(sp);
+ if (sp->type != SRB_ELS_DCMD)
+ sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ if (fcport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ } else {
+ pr_info("Async-%s timeout - hdl=%x.\n",
+ sp->name, sp->handle);
+ }
switch (sp->type) {
case SRB_LOGIN_CMD:
+ if (!fcport)
+ break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
qla24xx_handle_plogi_done_event(fcport->vha, &ea);
break;
case SRB_LOGOUT_CMD:
+ if (!fcport)
+ break;
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
break;
case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PLOGI:
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
+ case SRB_CTRL_VP:
sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
}
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!vha->flags.online)
goto done;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- goto done;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
+ fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -272,16 +284,126 @@ done:
return rval;
}
+void
+qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
+}
+
+static void
+qla2x00_async_prlo_sp_done(void *s, int res)
+{
+ srb_t *sp = (srb_t *)s;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp);
+}
+
+int
+qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_PRLO_CMD;
+ sp->name = "prlo";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prlo_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+static
+void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (ea->data[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "%s %8phC: adisc fail: post delete\n",
+ __func__, ea->fcport->port_name);
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ return;
+ }
+
+ if (ea->fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != ea->fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, ea->fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ return;
+ }
+
+ __qla24xx_handle_gpdb_event(vha, ea);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x %8phC\n",
+ sp->name, res, sp->fcport->port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_ADISC_DONE;
+ ea.rc = res;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.fcport = sp->fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
sp->free(sp);
}
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
return rval;
done_free_sp:
sp->free(sp);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0, current_login_state;
+ u16 data[2];
+ u8 current_login_state;
fcport = ea->fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc,
+ fcport->login_gen, fcport->last_login_gen,
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
- "%s %8phC login gen changed login %d|%d\n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ "%s %8phC login gen changed\n",
+ __func__, fcport->port_name);
return;
}
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport, 1);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e4,
"%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
- opt = PDO_FORCE_ADISC;
- qla24xx_post_gpdb_work(vha, fcport, opt);
+
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
case DSC_LS_PORT_UNAVAIL:
default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (!found) {
/* fw has no record of this port */
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- } else {
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
-
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport, 1);
- }
-
- if (fcport->loop_id == loop_id) {
- /* FW already picked this loop id for another fcport */
- qla2x00_find_new_loop_id(vha, fcport);
- }
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
}
+
+ /* FW already picked this loop id for another fcport */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
struct get_name_list_extended *e;
u64 wwn;
struct list_head h;
+ bool found = false;
ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ /* create new fcport if fw has knowledge of new sessions */
+ for (i = 0; i < n; i++) {
+ port_id_t id;
+ u64 wwnn;
+
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ found = false;
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!memcmp((u8 *)&wwn, fcport->port_name,
+ WWN_SIZE)) {
+ found = true;
+ break;
+ }
+ }
+
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC %06x post new sess\n",
+ __func__, __LINE__, (u8 *)&wwn, id.b24);
+ wwnn = wwn_to_u64(e->node_name);
+ qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
+ (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ }
+ }
+
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
unsigned long flags;
u16 *mb;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
if (vha->gnl.sent) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
+ return QLA_SUCCESS;
}
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
- int rval = QLA_SUCCESS;
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- if (res) {
- rval = res;
- goto gpd_error_out;
- }
-
- pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
-
- rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-gpd_error_out:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
- ea.rc = rval;
ea.fcport = fcport;
ea.sp = sp;
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
e->u.fcport.fcport = fcport;
e->u.fcport.opt = opt;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GPDB;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
static
-void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- int rval = ea->rc;
- fc_port_t *fcport = ea->fcport;
unsigned long flags;
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
- fcport->disc_state, fcport->fw_login_state, rval);
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0x20d3,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen,
- fcport->login_gen);
- return;
- } else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- return;
- }
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- return;
- }
-
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
!vha->hw->flags.gpsc_supported) {
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
+ __func__, __LINE__, ea->fcport->port_name,
vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, fcport);
+ if (ea->fcport->id_changed) {
+ ea->fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, ea->fcport);
+ }
}
+ } else if (ea->fcport->login_succ) {
+ /*
+ * We have an existing session. A late RSCN delivery
+ * must have triggered the session to be re-validate.
+ * Session is still valid.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC session revalidate success\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+ struct port_database_24xx *pd;
+ struct srb *sp = ea->sp;
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, ea->rc);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ switch (pd->current_login_state) {
+ case PDS_PRLI_COMPLETE:
+ __qla24xx_parse_gpdb(vha, fcport, pd);
+ break;
+ case PDS_PLOGI_PENDING:
+ case PDS_PLOGI_COMPLETE:
+ case PDS_PRLI_PENDING:
+ case PDS_PRLI2_PENDING:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
+ __func__, __LINE__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ case PDS_LOGO_PENDING:
+ case PDS_PORT_UNAVAILABLE:
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ __qla24xx_handle_gpdb_event(vha, ea);
} /* gpdb event */
-int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- if (fcport->login_retry == 0)
- return 0;
+ u8 login = 0;
+ int rc;
- if (fcport->scan_state != QLA_FCPORT_FOUND)
- return 0;
+ if (qla_tgt_mode_enabled(vha))
+ return;
+
+ if (qla_dual_mode_enabled(vha)) {
+ if (N2N_TOPO(vha->hw)) {
+ u64 mywwn, wwn;
+
+ mywwn = wwn_to_u64(vha->port_name);
+ wwn = wwn_to_u64(fcport->port_name);
+ if (mywwn > wwn)
+ login = 1;
+ else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ && time_after_eq(jiffies,
+ fcport->plogi_nack_done_deadline))
+ login = 1;
+ } else {
+ login = 1;
+ }
+ } else {
+ /* initiator mode */
+ login = 1;
+ }
+
+ if (login) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ rc = qla2x00_find_new_loop_id(vha, fcport);
+ if (rc) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess - out of loopid\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+}
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ u16 data[2];
+ u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
- fcport->loop_id);
+ fcport->login_gen, fcport->login_retry,
+ fcport->loop_id, fcport->scan_state);
- fcport->login_retry--;
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
+ }
}
/* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
+ fcport->login_retry--;
+
switch (fcport->disc_state) {
case DSC_DELETED:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ wwn = wwn_to_u64(fcport->node_name);
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20bf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla_chk_n2n_b4_login(vha, fcport);
}
break;
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
}
- if (fcport->flags & FCF_FCP2_DEVICE) {
- u8 opt = PDO_FORCE_ADISC;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c9,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
-
- fcport->disc_state = DSC_GPDB;
- qla24xx_post_gpdb_work(vha, fcport, opt);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
- }
-
+ qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gidpn_work(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ qla_chk_n2n_b4_login(vha, fcport);
+ else
+ qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post gpdb\n",
+ "%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
switch (fcport->disc_state) {
case DSC_DELETED:
case DSC_LOGIN_COMPLETE:
- qla24xx_post_gidpn_work(fcport->vha, fcport);
+ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
break;
-
default:
break;
}
}
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
- u8 *port_name, void *pla)
+ u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
e->u.new_sess.id = *id;
e->u.new_sess.pla = pla;
+ e->u.new_sess.fc4_type = fc4_type;
memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+ if (node_name)
+ memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
return qla2x00_post_work(vha, e);
}
static
-int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- switch (vha->host->active_mode) {
- case MODE_INITIATOR:
- case MODE_DUAL:
- if (fcport->scan_state == QLA_FCPORT_FOUND)
- qla24xx_fcport_handle_login(vha, fcport);
- break;
-
- case MODE_TARGET:
- default:
- /* no-op */
- break;
- }
-
- return 0;
-}
-
-static
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->login_retry++;
- return;
- }
-
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
}
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gidpn(vha, fcport);
+ qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport, *f, *tf;
+ fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- int rc;
+ unsigned long flags;
switch (ea->event) {
- case FCME_RELOGIN:
case FCME_RSCN:
case FCME_GIDPN_DONE:
case FCME_GPSC_DONE:
case FCME_GPNID_DONE:
+ case FCME_GNNID_DONE:
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
- }
- } else {
- ea->fcport = fcport;
- qla24xx_handle_rscn_event(fcport, ea);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
}
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_handle_gnl_done_event(vha, ea);
break;
case FCME_GPSC_DONE:
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla24xx_handle_gpsc_event(vha, ea);
break;
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFFID_DONE:
qla24xx_handle_gffid_event(vha, ea);
break;
- case FCME_DELETE_DONE:
- qla24xx_handle_delete_done_event(vha, ea);
+ case FCME_ADISC_DONE:
+ qla24xx_handle_adisc_event(vha, ea);
+ break;
+ case FCME_GNNID_DONE:
+ qla24xx_handle_gnnid_event(vha, ea);
+ break;
+ case FCME_GFPNID_DONE:
+ qla24xx_handle_gfpnid_event(vha, ea);
break;
default:
BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
sp->free(sp);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
sp->name = "abort";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+
+ if (vha->flags.qpairs_available && cmd_sp->qpair)
+ abt_iocb->u.abt.req_que_no =
+ cpu_to_le16(cmd_sp->qpair->req->id);
+ else
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+
sp->done = qla24xx_abort_sp_done;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
+ u16 lid;
+ struct fc_port *conflict_fcport;
+ unsigned long flags;
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC Remote is trying to login\n",
+ __func__, __LINE__, fcport->port_name);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
+ ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
}
break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
ea->fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ lid = ea->iop[1] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(ea->fcport->port_name),
+ ea->fcport->d_id, lid, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another fcport share the same loop_id/nport id.
+ * Conflict fcport needs to finish cleanup before this
+ * fcport can proceed to login.
+ */
+ conflict_fcport->conflict = ea->fcport;
+ ea->fcport->login_pause = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ ea->fcport->loop_id = lid;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
{
int rval;
- uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
- if (ha->fw_dump) {
+ if (ha->eft) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "Firmware dump already allocated.\n");
+ "%s: Offload Mem is already allocated.\n",
+ __func__);
return;
}
- ha->fw_dumped = 0;
- ha->fw_dump_cap_flags = 0;
- dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
- req_q_size = rsp_q_size = 0;
-
- if (IS_QLA27XX(ha))
- goto try_fce;
-
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- fixed_size = sizeof(struct qla2100_fw_dump);
- } else if (IS_QLA23XX(ha)) {
- fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
- mem_size = (ha->fw_memory_size - 0x11000 + 1) *
- sizeof(uint16_t);
- } else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
- fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
- else if (IS_QLA81XX(ha))
- fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
- else if (IS_QLA25XX(ha))
- fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
- else
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
-
- mem_size = (ha->fw_memory_size - 0x100000 + 1) *
- sizeof(uint32_t);
- if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
- mq_size = sizeof(struct qla2xxx_mq_chain);
- /*
- * Allocate maximum buffer size for all queues.
- * Resizing must be done at end-of-dump processing.
- */
- mq_size += ha->max_req_queues *
- (req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
- (rsp->length * sizeof(response_t));
- }
- if (ha->tgt.atio_ring)
- mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha))
goto try_eft;
-try_fce:
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
EFT_SIZE / 1024);
- goto cont_alloc;
+ goto eft_err;
}
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
"Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
- goto cont_alloc;
+ goto eft_err;
}
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
ha->eft = tc;
}
-cont_alloc:
+eft_err:
+ return;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+try_eft:
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
if (IS_QLA27XX(ha)) {
if (!ha->fw_dump_template) {
ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
ha->exlogin_size;
allocate:
- ha->fw_dump = vmalloc(dump_size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x00c4,
- "Unable to allocate (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (ha->fce) {
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
- ha->fce = NULL;
- ha->fce_dma = 0;
- }
-
- if (ha->eft) {
- dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
- ha->eft_dma);
- ha->eft = NULL;
- ha->eft_dma = 0;
+ if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ fw_dump = vmalloc(dump_size);
+ if (!fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
- return;
}
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
-
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && ql2xallocfwdump
- && !(IS_P3P_TYPE(ha)))
+ if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_offload_mem(vha);
+
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+
} else {
goto failed;
}
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
+
+ /* Reserve 1/2 of emergency exchanges for ELS.*/
+ if (qla2xuseresexchforels)
+ ha->fw_options[2] |= BIT_8;
+ else
+ ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
+ unsigned long flags;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.area = area;
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_host_map(vha, id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
+ if (ha->flags.rida_fmt2) {
+ /* With Rida Format 2, the login is already triggered.
+ * We know who is on the other side of the wire.
+ * No need to login to do login to find out or drop into
+ * qla2x00_configure_local_loop().
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ }
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /*
- * Mark local devices that were present with FCF_DEVICE_LOST for now.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) == FCS_ONLINE &&
- fcport->port_type != FCT_BROADCAST &&
- (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-
- ql_dbg(ql_dbg_disc, vha, 0x2096,
- "Marking port lost loop_id=0x%04x.\n",
- fcport->loop_id);
-
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- }
- }
-
/* Inititae N2N login. */
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->d_id.b24 = new_fcport->d_id.b24;
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
-
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
-
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
break;
}
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(vha, fcport);
-
found_devs++;
}
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
cleanup_allocation:
kfree(new_fcport);
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
/* Mark the time right before querying FW for connected ports.
* This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha);
+ if (USE_ASYNC_SCAN(ha)) {
+ rval = QLA_SUCCESS;
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ if (rval)
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ rval = qla2x00_find_all_fabric_devs(vha);
+ }
if (rval != QLA_SUCCESS)
break;
} while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
-
- qlt_schedule_sess_for_deletion_lock
- (fcport);
+ qlt_schedule_sess_for_deletion(fcport);
continue;
}
}
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ SAVE_TOPO(ha);
+ ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
- goto fail;
-
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
+
ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- /* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_dbg(ql_dbg_io, vha, 0x3070,
- "mbx abort_command failed.\n");
- } else {
- ql_dbg(ql_dbg_io, vha, 0x3071,
- "mbx abort_command success.\n");
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
complete(&lio->u.els_logo.comp);
}
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ "%s ELS hdl=%x, portid=%06x done %8phC\n",
sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle =
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(req->id);
+ abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
wmb();
}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
return rval;
}
+static void
+qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
+{
+ int map, pos;
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->handle = sp->handle;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
+ vce->vp_count = cpu_to_le16(1);
+
+ /*
+ * index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
+ pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
+ vce->vp_idx_map[map] |= 1 << pos;
+}
+
+static void
+qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_NACK_LOGO:
qla2x00_send_notify_ack_iocb(sp, pkt);
break;
+ case SRB_CTRL_VP:
+ qla25xx_ctrlvp_iocb(sp, pkt);
+ break;
+ case SRB_PRLO_CMD:
+ qla24xx_prlo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ SAVE_TOPO(ha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.gpsc_supported = 1;
vha->flags.management_server_logged_in = 0;
break;
@@ -1009,7 +1009,7 @@ skip_rio:
if (qla_ini_mode_enabled(vha)) {
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -1059,8 +1059,7 @@ global_port_update:
* Mark all devices as missing so we will login again.
*/
atomic_set(&vha->loop_state, LOOP_UP);
-
- qla2x00_mark_all_devices_lost(vha, 1);
+ vha->scan.scan_retry = 0;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
+ /* fall through */
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
*/
- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
sp->name);
sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* drop through */
+ /* fall through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, ret);
}
+static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
+ struct vp_ctrl_entry_24xx *vce)
+{
+ const char func[] = "CTRLVP-IOCB";
+ srb_t *sp;
+ int rval = QLA_SUCCESS;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
+ if (!sp)
+ return;
+
+ if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c4,
+ "%s: Failed to complete IOCB -- error status (%x)\n",
+ sp->name, vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c5,
+ "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
+ sp->name, le16_to_cpu(vce->comp_status),
+ le16_to_cpu(vce->vp_idx_failed));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_vport, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ sp->rc = rval;
+ sp->done(sp, rval);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
- no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
port_state_str[atomic_read(&fcport->state)],
comp_status);
- if (no_logout)
- fcport->logout_on_delete = 0;
-
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -2972,9 +2998,9 @@ process_err:
(response_t *)pkt);
break;
} else {
- /* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
+ /* fall through */
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
qla24xx_mbx_iocb_entry(vha, rsp->req,
(struct mbx_24xx_entry *)pkt);
break;
+ case VP_CTRL_IOCB_TYPE:
+ qla_ctrlvp_completed(vha, rsp->req,
+ (struct vp_ctrl_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
{MBC_GET_PORT_DATABASE, "GPDB"},
{MBC_GET_ID_LIST, "GIDList"},
{MBC_GET_LINK_PRIV_STATS, "Stats"},
+ {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
};
static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
unsigned long flags;
int found;
port_id_t id;
+ struct fc_port *fcport;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
-
+ ha->current_topology = ISP_CFG_NL;
qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ ha->flags.gpsc_supported = 1;
+ ha->current_topology = ISP_CFG_F;
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name);
/* N2N. direct connect */
+ ha->current_topology = ISP_CFG_N;
+ ha->flags.rida_fmt2 = 1;
vha->d_id.b.domain = rptid_entry->port_id[2];
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f2.port_name, 1);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ } else {
+ id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
+ id.b.area = rptid_entry->u.f2.remote_nport_id[1];
+ id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f2.port_name,
+ rptid_entry->u.f2.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
}
/*
- * qla24xx_control_vp
- * Enable a virtual port for given host
- *
- * Input:
- * ha = adapter block pointer.
- * vhba = virtual adapter (unused)
- * index = index number for enabled VP
- *
- * Returns:
- * qla2xxx local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
-{
- int rval;
- int map, pos;
- struct vp_ctrl_entry_24xx *vce;
- dma_addr_t vce_dma;
- struct qla_hw_data *ha = vha->hw;
- int vp_index = vha->vp_idx;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
- "Entered %s enabling index %d.\n", __func__, vp_index);
-
- if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
- return QLA_PARAMETER_ERROR;
-
- vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
- if (!vce) {
- ql_log(ql_log_warn, vha, 0x10c2,
- "Failed to allocate VP control IOCB.\n");
- return QLA_MEMORY_ALLOC_FAILED;
- }
-
- vce->entry_type = VP_CTRL_IOCB_TYPE;
- vce->entry_count = 1;
- vce->command = cpu_to_le16(cmd);
- vce->vp_count = cpu_to_le16(1);
-
- /* index map in firmware starts with 1; decrement index
- * this is ok as we never use index 0
- */
- map = (vp_index - 1) / 8;
- pos = (vp_index - 1) & 7;
- mutex_lock(&ha->vport_lock);
- vce->vp_idx_map[map] |= 1 << pos;
- mutex_unlock(&ha->vport_lock);
-
- rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c3,
- "Failed to issue VP control IOCB (%x).\n", rval);
- } else if (vce->entry_status != 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c4,
- "Failed to complete IOCB -- error status (%x).\n",
- vce->entry_status);
- rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complete IOCB -- completion status (%x).\n",
- le16_to_cpu(vce->comp_status));
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
- "Done %s.\n", __func__);
- }
-
- dma_pool_free(ha->s_dma_pool, vce, vce_dma);
-
- return rval;
-}
-
-/*
* qla2x00_send_change_request
* Receive or disable RSCN request from fabric controller
*
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
}
/* Check for logged in state. */
- if (current_login_state != PDS_PRLI_COMPLETE &&
- last_login_state != PDS_PRLI_COMPLETE) {
+ if (current_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0x119a,
"Unable to verify login-state (%x/%x) for loop_id %x.\n",
current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
return rval;
}
+
+int qla24xx_res_count_wait(struct scsi_qla_host *vha,
+ uint16_t *out_mb, int out_mb_sz)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
+ memcpy(out_mb, mc.mb, out_mb_sz);
+ else
+ memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_vp_map(vha, SET_VP_IDX);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
- case MBA_PORT_UPDATE:
- case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ if ((mb[3] & 0xff) == vha->vp_idx) {
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ }
+ break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
- qla2x00_do_work(vha);
-
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
"FCPort update end.\n");
}
- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, vha, 0x4018,
- "Relogin needed scheduled.\n");
- qla2x00_relogin(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4019,
- "Relogin needed end.\n");
+ if (!vha->relogin_jif ||
+ time_after_eq(jiffies, vha->relogin_jif)) {
+ vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla24xx_post_relogin_work(vha);
+ }
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+ vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
vha->dpc_flags = 0L;
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (req) {
+ if (req && vha->flags.qpairs_req_created) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_req_que(vha, req);
+ }
return ret;
}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (rsp) {
+ if (rsp && vha->flags.qpairs_rsp_created) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_rsp_que(vha, rsp);
+ }
return ret;
}
@@ -884,3 +898,79 @@ que_failed:
failed:
return 0;
}
+
+static void qla_ctrlvp_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ complete(&sp->comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/**
+ * qla24xx_control_vp() - Enable a virtual port for given host
+ * @vha: adapter block pointer
+ * @cmd: command type to be sent for enable virtual port
+ *
+ * Return: qla2xxx local function return status code.
+ */
+int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+
+ ql_dbg(ql_dbg_vport, vha, 0x10c1,
+ "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CTRL_VP;
+ sp->name = "ctrl_vp";
+ sp->done = qla_ctrlvp_sp_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
+ sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&sp->comp);
+ rval = sp->rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ goto done_free_sp;
+ default:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+done:
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
#include "qla_def.h"
#include "qla_gbl.h"
-#include <linux/delay.h>
-
#define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
+int qla2xuseresexchforels;
+module_param(qla2xuseresexchforels, int, 0444);
+MODULE_PARM_DESC(qla2xuseresexchforels,
+ "Reserve 1/2 of emergency exchanges for ELS.\n"
+ " 0 (default): disabled");
+
/*
* SCSI host template entry points
*/
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
-void
-qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+static void
+__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int que, cnt, status;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
+ scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
- if (!req->outstanding_cmds)
- continue;
- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (sp) {
- req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- qla_nvme_abort(ha, sp);
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
- */
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
- }
- sp->done(sp, res);
- } else {
- if (!vha->hw->tgt.tgt_ops || !tgt ||
- qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
- continue;
- }
- cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha,
- cmd);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ req = qp->req;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ if (sp->cmd_type == TYPE_SRB) {
+ if (sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_NVME_LS) {
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (!test_bit(ABORT_ISP_ACTIVE,
+ &vha->dpc_flags)) &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
}
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
}
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla2x00_abort_all_cmds(ha->base_qpair, res);
+
+ for (que = 0; que < ha->max_qpairs; que++) {
+ if (!ha->queue_pair_map[que])
+ continue;
+
+ __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
+ }
}
static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
- int cnt = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int i = 20;
+ unsigned long flags;
- while (!list_empty(&vha->work_list)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
+ while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
- cnt++;
- if (cnt > 10)
- break;
+ i--;
}
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
}
/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->max_exchg = FW_MAX_EXCHANGES_CNT;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
ret = -ENOMEM;
- qla2x00_mem_free(ha);
- qla2x00_free_req_que(ha, req);
- qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_init_failed;
+ goto probe_hw_failed;
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (QLA_TGT_MODE_ENABLED()) {
mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
qla2x00_clear_drv_active(ha);
iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ }
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ vfree(base_vha->scan.l);
+
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
- /* Laser should be disabled only for ISP2031 */
- if (IS_QLA2031(ha))
- qla83xx_disable_laser(base_vha);
-
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ if (ha->flags.fw_started) {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ ha->flags.fw_started = 0;
+ }
+ }
vha->flags.online = 0;
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
u32 temp;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ if (max_cnt > vha->hw->max_exchg)
+ max_cnt = vha->hw->max_exchg;
+
if (qla_ini_mode_enabled(vha)) {
if (ql2xiniexchg > max_cnt)
ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- u16 size, max_cnt;
- u32 temp;
+ u16 size, max_cnt;
+ u32 actual_cnt, totsz;
struct qla_hw_data *ha = vha->hw;
if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- qla2x00_number_of_exch(vha, &temp, max_cnt);
- temp *= size;
+ qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
+ ql_log(ql_log_info, vha, 0xd014,
+ "Actual exchange offload count: %d.\n", actual_cnt);
+
+ totsz = actual_cnt * size;
- if (temp != ha->exchoffld_size) {
+ if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
- ha->exchoffld_size = temp;
+ ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
+ "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
+ max_cnt, actual_cnt, size, totsz);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
+ "Failed to allocate memory for Exchange Offload.\n");
+
+ if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
+ ha->max_exchg -= REDUCE_EXCHANGES_CNT;
+ } else if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + 512)) {
+ ha->max_exchg -= 512;
+ } else {
+ ha->flags.exchoffld_enabled = 0;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Disabling Exchange offload due to lack of memory\n");
+ }
+ ha->exchoffld_size = 0;
+
return -ENOMEM;
}
}
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
INIT_LIST_HEAD(&vha->nvme_rport_list);
+ INIT_LIST_HEAD(&vha->gpnid_list);
+ INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
return NULL;
}
+ /* todo: what about ext login? */
+ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
+ vha->scan.l = vmalloc(vha->scan.size);
+ if (!vha->scan.l) {
+ ql_log(ql_log_fatal, vha, 0xd04a,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
+ bool q = false;
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+
spin_unlock_irqrestore(&vha->work_lock, flags);
- if (QLA_EARLY_LINKUP(vha->hw))
- schedule_work(&vha->iocb_work);
- else
- qla2xxx_wake_dpc(vha);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
return QLA_SUCCESS;
}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
+ fcport->flags |= FCF_ASYNC_ACTIVE; \
return qla2x00_post_work(vha, e); \
}
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
+qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ u64 wwn;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
if (pla) {
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ memcpy(fcport->node_name,
+ pla->iocb.u.isp24.u.plogi.node_name,
+ WWN_SIZE);
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
/* we took an extra ref_count to prevent PLOGI ACK when
* fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (fcport) {
fcport->d_id = e->u.new_sess.id;
- fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
+ fcport->fc4_type = FC4_TYPE_FCP_SCSI;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- /* search again to make sure one else got ahead */
+ /* search again to make sure no one else got ahead */
tfcp = qla2x00_find_fcport_by_wwpn(vha,
e->u.new_sess.port_name, 1);
if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
} else {
list_add_tail(&fcport->list, &vha->vp_fcports);
- if (pla) {
- qlt_plogi_ack_link(vha, pla, fcport,
- QLT_PLOGI_LINK_SAME_WWN);
- pla->ref_count--;
- }
+ }
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (pla)
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
- else
- qla24xx_async_gffid(vha, fcport);
+ } else {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_nportid(vha,
+ &e->u.new_sess.id, 1);
+ if (tfcp && (tfcp != fcport)) {
+ /*
+ * We have a conflict fcport with same NportID.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC found conflict b4 add. DS %d LS %d\n",
+ __func__, tfcp->port_name, tfcp->disc_state,
+ tfcp->fw_login_state);
+
+ switch (tfcp->disc_state) {
+ case DSC_DELETED:
+ break;
+ case DSC_DELETE_PEND:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ break;
+ default:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ qlt_schedule_sess_for_deletion(tfcp);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ wwn = wwn_to_u64(fcport->node_name);
+
+ if (!wwn)
+ qla24xx_async_gnnid(vha, fcport);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
}
if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
}
+static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ struct srb *sp = e->u.iosb.sp;
+ int rval;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "%s: %s: Re-issue IOCB failed (%d).\n",
+ __func__, sp->name, rval);
+ qla24xx_sp_unmap(vha, sp);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
- case QLA_EVT_GPNID_DONE:
- qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ case QLA_EVT_UNMAP:
+ qla24xx_sp_unmap(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_RELOGIN:
+ qla2x00_relogin(vha);
break;
case QLA_EVT_NEW_SESS:
qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_NACK:
qla24xx_do_nack_work(vha, e);
break;
+ case QLA_EVT_ASYNC_PRLO:
+ qla2x00_async_prlo(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_PRLO_DONE:
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_GPNFT:
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ break;
+ case QLA_EVT_GPNFT_DONE:
+ qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNFT_DONE:
+ qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNID:
+ qla24xx_async_gnnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SP_RETRY:
+ qla_sp_retry(vha, e);
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
}
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
+
+ if (!e) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return qla2x00_post_work(vha, e);
+}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
- fcport->login_retry--;
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ fcport->login_retry &&
+ !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
+ fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0x400e,
+ "Relogin end.\n");
}
/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
- qla2x00_do_work(base_vha);
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED,
- &base_vha->dpc_flags)) &&
+ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
- "Relogin scheduled.\n");
- qla2x00_relogin(base_vha);
- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
- "Relogin end.\n");
+ if (!base_vha->relogin_jif ||
+ time_after_eq(jiffies, base_vha->relogin_jif)) {
+ base_vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla24xx_post_relogin_work(base_vha);
+ }
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
}
/* Process any deferred work. */
- if (!list_empty(&vha->work_list))
- start_dpc++;
+ if (!list_empty(&vha->work_list)) {
+ unsigned long flags;
+ bool q = false;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
+ }
/*
* FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
-static void
-qla83xx_disable_laser(scsi_qla_host_t *vha)
-{
- uint32_t reg, data, fn;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
-
- /* pci func #/port # */
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "Disabling Laser for hba: %p\n", vha);
-
- fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
- (BIT_15|BIT_14|BIT_13|BIT_12));
-
- fn = (fn >> 12);
-
- if (fn & 1)
- reg = PORT_1_2031;
- else
- reg = PORT_0_2031;
-
- data = LASER_OFF_2031;
-
- qla83xx_wr_reg(vha, reg, data);
-}
-
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
+ int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ if (USER_CTRL_IRQ(vha->hw))
+ rc = blk_mq_map_queues(&shost->tag_set);
+ else
+ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ return rc;
}
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
sec_mask = 0x1e000;
break;
}
+ /* fall through */
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
-static int temp_sam_status = SAM_STAT_BUSY;
+static int qla_sam_status = SAM_STAT_BUSY;
+static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
/*
* From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0x502f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0x503a,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
&u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0x503d,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
+ /* fall through */
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
__func__, __LINE__,
sp->fcport->port_name,
vha->fcport_count);
-
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
-
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
fcport->d_id = port_id;
- qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ else
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
pla->ref_count, pla, link);
+ if (link == QLT_PLOGI_LINK_CONFLICT) {
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ pla->ref_count--;
+ return;
+ default:
+ break;
+ }
+ }
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- struct event_arg ea;
scsi_qla_host_t *base_vha;
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ sess->send_els_logo = 0;
qlt_send_first_logo(vha, &logo);
}
- if (sess->logout_on_delete) {
+ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
int rc;
- rc = qla2x00_post_async_logout_work(vha, sess, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (!own ||
+ (own &&
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ rc = qla2x00_post_async_logout_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ } else if (own && (own->iocb.u.isp24.status_subcode ==
+ ELS_PRLI) && ha->flags.rida_fmt2) {
+ rc = qla2x00_post_async_prlo_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule PRLO failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
}
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
- "%s: sess %p logout completed\n",__func__, sess);
+ "%s: sess %p logout completed\n", __func__, sess);
}
if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != ha->base_qpair->chip_reset)
- qla2x00_clear_loop_id(sess);
+ qla2x00_clear_loop_id(sess);
if (sess->conflict) {
sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
}
{
- struct qlt_plogi_ack_t *own =
- sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
}
}
+
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&vha->fcport_waitQ);
base_vha = pci_get_drvdata(ha->pdev);
+
+ sess->free_pending = 0;
+
if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
return;
- if (!tgt || !tgt->tgt_stop) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_DELETE_DONE;
- ea.fcport = sess;
- qla2x00_fcport_event_handler(vha, &ea);
+ if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
}
}
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->free_pending) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->free_pending = 1;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_schedule_sess_for_deletion(struct fc_port *sess,
- bool immediate)
+void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
return;
}
- sess->disc_state = DSC_DELETE_PEND;
-
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- schedule_work(&sess->del_work);
-}
-
-void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
-{
- unsigned long flags;
- struct qla_hw_data *ha = sess->vha->hw;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess, 1);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ /* use cancel to push work element through before re-queue */
+ cancel_work_sync(&sess->del_work);
+ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
+ queue_work(sess->vha->hw->wq, &sess->del_work);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
list_for_each_entry(sess, &vha->vp_fcports, list) {
if (sess->se_sess)
- qlt_schedule_sess_for_deletion(sess, 1);
+ qlt_schedule_sess_for_deletion(sess);
}
/* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess, false);
+ qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
btree_destroy64(&tgt->lun_qpair_map);
- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
- ha->tgt.tgt_ops->remove_target(vha);
+ if (vha->vp_idx)
+ if (ha->tgt.tgt_ops &&
+ ha->tgt.tgt_ops->remove_target &&
+ vha->vha_tgt.target_lport_ptr)
+ ha->tgt.tgt_ops->remove_target(vha);
vha->vha_tgt.qla_tgt = NULL;
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
- QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
-
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (rc != 0) {
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_REJECTED, false);
- }
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
- qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
} else {
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
term = 1;
if (term)
- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
- qlt_schedule_sess_for_deletion_lock(cmd->sess);
+ qlt_schedule_sess_for_deletion(cmd->sess);
}
break;
}
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
return cmd;
}
-static void qlt_create_sess_from_atio(struct work_struct *work)
-{
- struct qla_tgt_sess_op *op = container_of(work,
- struct qla_tgt_sess_op, work);
- scsi_qla_host_t *vha = op->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_del(&op->cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
- if (op->aborted) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
- "sess_op with tag %u is aborted\n",
- op->atio.u.isp24.exchange_addr);
- goto out_term;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (op->atio.u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
- goto out_term;
- }
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
-
- if (!sess)
- goto out_term;
- /*
- * Now obtain a pre-allocated session tag using the original op->atio
- * packet header, and dispatch into __qlt_do_work() using the existing
- * process context.
- */
- cmd = qlt_get_tag(vha, sess, &op->atio);
- if (!cmd) {
- struct qla_qpair *qpair = ha->base_qpair;
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- kfree(op);
- return;
- }
-
- /*
- * __qlt_do_work() will call qlt_put_sess() to release
- * the extra reference taken above by qlt_make_local_sess()
- */
- __qlt_do_work(cmd);
- kfree(op);
- return;
-out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
- kfree(op);
-}
-
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
+ port_id_t id;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
- return -EFAULT;
+ return -ENODEV;
}
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
- if (unlikely(!sess)) {
- struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
- GFP_ATOMIC);
- if (!op)
- return -ENOMEM;
-
- memcpy(&op->atio, atio, sizeof(*atio));
- op->vha = vha;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ if (IS_SW_RESV_ADDR(id))
+ return -EBUSY;
- INIT_WORK(&op->work, qlt_create_sess_from_atio);
- queue_work(qla_tgt_wq, &op->work);
- return 0;
- }
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess))
+ return -EFAULT;
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return -ENOMEM;
+ return -EBUSY;
}
cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt;
struct fc_port *sess;
u64 unpacked_lun;
int fn;
unsigned long flags;
- tgt = vha->vha_tgt.qla_tgt;
-
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
unpacked_lun =
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
- "qla_target(%d): task mgmt fn 0x%x for "
- "non-existant session\n", vha->vp_idx, fn);
- return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
- sizeof(struct atio_from_isp));
- }
-
- if (sess->deleted)
+ if (sess == NULL || sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* might have cleared it when requested this session
* deletion, so don't touch it
*/
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
} else {
/*
* Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
-
other_sess->keep_nport_handle = 1;
- *conflict_sess = other_sess;
- qlt_schedule_sess_for_deletion(other_sess,
- true);
+ if (other_sess->disc_state != DSC_DELETED)
+ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess);
}
continue;
}
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* Same loop_id but different s_id
* Ok to kill and logout */
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
}
}
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
return count;
}
+static int qlt_handle_login(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id, wd3_lo;
+ int res = 0;
+ struct qlt_plogi_ack_t *pla;
+ unsigned long flags;
+
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ /* Mark all stale commands sitting in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
+ goto out;
+ }
+
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+ }
+
+ if (!sess) {
+ pla->ref_count++;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name,
+ iocb->u.isp24.u.plogi.node_name,
+ pla, FC4_TYPE_UNKNOWN);
+ else
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, NULL,
+ pla, FC4_TYPE_UNKNOWN);
+
+ goto out;
+ }
+
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ if (iocb->u.isp24.status_subcode == ELS_PRLI) {
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
+
+ } else
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__, sess->port_name, sess->disc_state);
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion(sess);
+ break;
+ }
+out:
+ return res;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
*/
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
+ res = qlt_handle_login(vha, iocb);
+ break;
- /* Mark all stale commands in qla_tgt_wq for deletion */
- abort_cmds_for_s_id(vha, &port_id);
-
- if (wwn) {
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(vha, wwn,
- port_id, loop_id, &conflict_sess);
- spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
- }
-
- if (IS_SW_RESV_ADDR(port_id)) {
- res = 1;
- break;
- }
-
- pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
- if (!pla) {
- qlt_send_term_imm_notif(vha, iocb, 1);
- break;
- }
-
- res = 0;
+ case ELS_PRLI:
+ if (N2N_TOPO(ha)) {
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
- if (conflict_sess) {
- conflict_sess->login_gen++;
- qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
- }
+ if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
- if (!sess) {
- pla->ref_count++;
- qla24xx_post_newsess_work(vha, &port_id,
- iocb->u.isp24.port_name, pla);
- res = 0;
+ res = qlt_handle_login(vha, iocb);
break;
}
- qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- sess->fw_login_state = DSC_LS_PLOGI_PEND;
- sess->d_id = port_id;
- sess->login_gen++;
-
- switch (sess->disc_state) {
- case DSC_DELETED:
- qlt_plogi_ack_unref(vha, pla);
- break;
-
- default:
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->d_id.b24 == port_id.b24));
-
- ql_dbg(ql_dbg_disc, vha, 0x20f9,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
-
-
- qlt_schedule_sess_for_deletion_lock(sess);
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
break;
}
- break;
-
- case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (conflict_sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
- "PRLI with conflicting sess %p port %8phC\n",
- conflict_sess, conflict_sess->port_name);
- qlt_send_term_imm_notif(vha, iocb, 1);
- res = 0;
- break;
+ switch (conflict_sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ conflict_sess->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
}
if (sess != NULL) {
- if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
- sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+ bool delete = false;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+ switch (sess->fw_login_state) {
+ case DSC_LS_PLOGI_PEND:
+ case DSC_LS_PLOGI_COMP:
+ case DSC_LS_PRLI_COMP:
+ break;
+ default:
+ delete = true;
+ break;
+ }
+
+ switch (sess->disc_state) {
+ case DSC_LOGIN_PEND:
+ case DSC_GPDB:
+ case DSC_GPSC:
+ case DSC_UPD_FCPORT:
+ case DSC_LOGIN_COMPLETE:
+ case DSC_ADISC:
+ delete = false;
+ break;
+ default:
+ break;
+ }
+
+ if (delete) {
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
+ flags);
break;
}
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->port_type = FCT_INITIATOR;
else
sess->port_type = FCT_TARGET;
+
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
res = 1; /* send notify ack */
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* drop through */
+ /* fall through */
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
} else {
/* cmd did not go to upper layer. */
if (sess) {
- qlt_schedule_sess_for_deletion_lock(sess);
+ qlt_schedule_sess_for_deletion(sess);
res = 0;
}
/* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
+ vha->vp_idx, iocb->u.isp24.status_subcode, res);
+
return res;
}
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
- uint16_t status;
unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- status = temp_sam_status;
- qlt_send_busy(qpair, atio, status);
+ qlt_send_busy(qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
- unsigned long flags;
+ unsigned long flags = 0;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_task_mgmt(vha, atio);
}
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
-
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_BUSY);
-#else
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
atio, 1, 0);
-#endif
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe059,
- "qla_target: Unable to send "
- "command to target for req, "
- "ignoring.\n");
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe05a,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status.\n", vha->vp_idx);
- if (!ha_locked)
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair,
- atio, SAM_STAT_BUSY);
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ qla_sam_status);
+ break;
}
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
}
break;
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(rsp->qpair, atio, 0);
-#else
- qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
-#endif
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe05f,
- "qla_target: Unable to send "
- "command to target, sending TERM "
- "EXCHANGE for rsp\n");
- qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe060,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status\n", vha->vp_idx);
- qlt_send_busy(rsp->qpair, atio, 0);
- }
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
+ qlt_send_term_exchange(rsp->qpair, NULL,
+ atio, 1, 0);
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ qla_sam_status);
+ break;
}
}
}
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
unsigned long flags;
u8 newfcport = 0;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
tfcp->port_type = fcport->port_type;
tfcp->supported_classes = fcport->supported_classes;
tfcp->flags |= fcport->flags;
+ tfcp->scan_state = QLA_FCPORT_FOUND;
del = fcport;
fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
qlt_add_target(ha, vha);
}
-void
-qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+u8
+qlt_rff_id(struct scsi_qla_host *vha)
{
+ u8 fc4_feature = 0;
/*
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ fc4_feature = BIT_1;
} else if (qla_dual_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ fc4_feature = BIT_0 | BIT_1;
+
+ return fc4_feature;
}
/*
@@ -6546,7 +6622,9 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb;
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- icb = (struct init_cb_24xx *)ha->init_cb;
-
- if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
- struct qla_msix_entry *msix = &ha->msix_entries[2];
-
- icb->msix_atio = cpu_to_le16(msix->entry);
- ql_dbg(ql_dbg_init, vha, 0xf072,
- "Registering ICB vector 0x%x for atio que.\n",
- msix->entry);
- } else if (ql2xenablemsix == 0) {
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- ql_dbg(ql_dbg_init, vha, 0xf07f,
- "Registering INTx vector for ATIO.\n");
+ if (ha->flags.msix_enabled) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ } else {
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= ~BIT_26;
+ }
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+ } else {
+ /* INTx|MSI */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "%s: Use INTx for ATIOQ.\n", __func__);
+ }
}
}
@@ -6574,6 +6661,7 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ if (IS_QLA25XX(ha)) {
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
+ }
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
- unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
if (!vha->d_id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
} else if (vha->d_id.b24 != id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
- ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+ ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
-extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern u8 qlt_rff_id(struct scsi_qla_host *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20c,
"%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
- if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ switch (ent->t268.buf_type) {
+ case T268_BUF_TYPE_EXTD_TRACE:
if (vha->hw->eft) {
if (buf) {
ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else {
- ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ break;
+ case T268_BUF_TYPE_EXCH_BUFOFF:
+ if (vha->hw->exchoffld_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exchoffld_size;
+ ent->t268.start_addr =
+ vha->hw->exchoffld_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exchoffld_buf,
+ vha->hw->exchoffld_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing exch offld\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+ case T268_BUF_TYPE_EXTD_LOGIN:
+ if (vha->hw->exlogin_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exlogin_size;
+ ent->t268.start_addr =
+ vha->hw->exlogin_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exlogin_buf,
+ vha->hw->exlogin_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing ext login\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
+ break;
}
return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.02-k"
+#define QLA2XXX_VERSION "10.00.00.05-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
- 65536);
+ lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
- * 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
- memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
- memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
return status;
}
- memset(sys_info, 0, sizeof(*sys_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
- memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
+ * encouraged once assigned by ANSI/INCITS T10).
*/
static const char *const scsi_device_types[] = {
"Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
};
/**
- * scsi_device_type - Return 17 char string indicating device type.
+ * scsi_device_type - Return 17-char string indicating device type.
* @type: type number to look up
*/
const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
* @scsilun: struct scsi_lun to be converted.
*
* Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
* integer, and return the result. The caller must check for
* truncation before using this function.
*
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
* back into the lun value.
*
* Notes:
- * Given an integer : 0x0b03d204, this function returns a
+ * Given an integer : 0x0b03d204, this function returns a
* struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
*/
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
+ * @desc: Sense format (non-zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
* @info: 64-bit information value to be set
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
**/
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
* @cd: command/data bit
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
*/
int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2016 Douglas Gilbert
+ * Copyright (C) 2001 - 2017 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "1.86"
-static const char *sdebug_version_date = "20160430";
+#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20171202";
#define MY_NAME "scsi_debug"
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
#define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+#define WRITE_ERROR_ASC 0xc
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
+#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
- SDEBUG_OPT_SHORT_TRANSFER)
+ SDEBUG_OPT_SHORT_TRANSFER | \
+ SDEBUG_OPT_HOST_BUSY)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
#define F_M_ACCESS 0x800 /* media access */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
+ SDEB_DEFER_WQ = 2};
+
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int issuing_cpu;
+ bool init_hrt;
+ bool init_wq;
+ enum sdeb_defer_type defer_t;
};
struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
+ unsigned int inj_host_busy:1;
};
struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
+ u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
+ /* 1 to min(cdb_len, 15); ignore cdb[15...] */
};
/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
SDEB_I_READ = 9, /* 6, 10, 12, 16 */
SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
SDEB_I_START_STOP = 11,
- SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
- SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
+ SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
SDEB_I_VERIFY = 16, /* 10 only */
- SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 27, /* 10, 16 */
SDEB_I_SYNC_CACHE = 28, /* 10 only */
SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
};
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
- SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
- 0, 0, 0, 0,
+ SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
+ 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-static const struct opcode_info_t msense_iarr[1] = {
+/*
+ * The following are overflow arrays for cdbs that "hit" the same index in
+ * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
+ * should be placed in opcode_info_arr[], the others should be placed here.
+ */
+static const struct opcode_info_t msense_iarr[] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t mselect_iarr[1] = {
+static const struct opcode_info_t mselect_iarr[] = {
{0, 0x15, 0, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t read_iarr[3] = {
- {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+static const struct opcode_info_t read_iarr[] = {
+ {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
- {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t write_iarr[3] = {
- {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
- {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
- 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
- {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
- {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
- 0xc7, 0, 0, 0, 0} },
+static const struct opcode_info_t write_iarr[] = {
+ {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t sa_in_iarr[1] = {
+static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0, 0xc7} },
+ 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
};
-static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
- NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+ {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
+ 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
-static const struct opcode_info_t maint_in_iarr[2] = {
+static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- 0xc7, 0, 0, 0, 0} },
+ 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
-static const struct opcode_info_t write_same_iarr[1] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+static const struct opcode_info_t write_same_iarr[] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+ 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
-static const struct opcode_info_t reserve_iarr[1] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+static const struct opcode_info_t reserve_iarr[] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t release_iarr[1] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+static const struct opcode_info_t release_iarr[] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORT LUNS */
{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
- {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
- 0} },
- {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
- {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+/* 5 */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
+ 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
- {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
- {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ resp_write_dt0, write_iarr, /* WRITE(16) */
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
{0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
- {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
- {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
- 0} },
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
+ {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
+ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0, 0xc7, 0, 0, 0, 0} },
+/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
- vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
- 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
- {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
+ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
+ 0xff, 0xff} },
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
- {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+/* 25 */
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
- write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
- 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
- {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
+ 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
- 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+ 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
/* 30 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
static int sdebug_add_host = DEF_NUM_HOST;
static int sdebug_ato = DEF_ATO;
+static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void config_cdb_len(struct scsi_device *sdev)
+{
+ switch (sdebug_cdb_len) {
+ case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = true;
+ break;
+ case 16:
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ case 32: /* No knobs to suggest this so same as 16 for now */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ default:
+ pr_warn("unexpected cdb_len=%d, force to 10\n",
+ sdebug_cdb_len);
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ sdebug_cdb_len = 10;
+ break;
+ }
+}
+
+static void all_config_cdb_len(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ shost = sdbg_host->shost;
+ shost_for_each_device(sdev, shost) {
+ config_cdb_len(sdev);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static char sdebug_inq_vendor_id[9] = "Linux ";
static char sdebug_inq_product_id[17] = "scsi_debug ";
-static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
+static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[8], sdebug_inq_vendor_id, 8);
memcpy(&arr[16], sdebug_inq_product_id, 16);
memcpy(&arr[32], sdebug_inq_product_rev, 4);
+ /* Use Vendor Specific area to place driver date in ASCII hex */
+ memcpy(&arr[36], sdebug_version_date, 8);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
- case 0x3: /* Format device page, direct access */
+ case 0x3: /* Format device page, direct access */
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
} else
bad_pcode = true;
- break;
+ break;
case 0x8: /* Caching page, direct access */
if (is_disk) {
len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
if ((subpcode > 0x2) && (subpcode < 0xff)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
break;
default:
bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
- res = fetch_to_dev_buffer(scp, arr, param_len);
- if (-1 == res)
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
return DID_ERROR << 16;
else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
- memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
- return sizeof(temp_l_pg);
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
}
static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
- memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
arr[4] = THRESHOLD_EXCEEDED;
arr[5] = 0xff;
}
- return sizeof(ie_l_pg);
+ return sizeof(ie_l_pg);
}
#define SDEBUG_MAX_LSENSE_SZ 512
-static int resp_log_sense(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+static int resp_log_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int ppc, sp, pcode, subpcode, alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
- bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, 0, do_write);
+ (num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep, rest * sdebug_sector_size,
- (num - rest) * sdebug_sector_size, do_write);
+ sg_skip + ((num - rest) * sdebug_sector_size),
+ do_write);
}
return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, false);
+ ret = do_device_access(scp, 0, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, true);
+ ret = do_device_access(scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
+/*
+ * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
+ * No READ GATHERED yet (requires bidi or long cdb holding gather list).
+ */
+static int resp_write_scat(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *lrdp = NULL;
+ u8 *up;
+ u8 wrprotect;
+ u16 lbdof, num_lrd, k;
+ u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
+ u32 lb_size = sdebug_sector_size;
+ u32 ei_lba;
+ u64 lba;
+ unsigned long iflags;
+ int ret, res;
+ bool is_16;
+ static const u32 lrd_size = 32; /* + parameter list header size */
+
+ if (cmd[0] == VARIABLE_LENGTH_CMD) {
+ is_16 = false;
+ wrprotect = (cmd[10] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 12);
+ num_lrd = get_unaligned_be16(cmd + 16);
+ bt_len = get_unaligned_be32(cmd + 28);
+ } else { /* that leaves WRITE SCATTERED(16) */
+ is_16 = true;
+ wrprotect = (cmd[2] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 4);
+ num_lrd = get_unaligned_be16(cmd + 8);
+ bt_len = get_unaligned_be32(cmd + 10);
+ if (unlikely(have_dif_prot)) {
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
+ wrprotect) {
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
+ wrprotect == 0)
+ sdev_printk(KERN_ERR, scp->device,
+ "Unprotected WR to DIF device\n");
+ }
+ }
+ if ((num_lrd == 0) || (bt_len == 0))
+ return 0; /* T10 says these do-nothings are not errors */
+ if (lbdof == 0) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LB Data Offset field bad\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lbdof_blen = lbdof * lb_size;
+ if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LBA range descriptors don't fit\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
+ my_name, __func__, lbdof_blen);
+ res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
+ if (res == -1) {
+ ret = DID_ERROR << 16;
+ goto err_out;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+ sg_off = lbdof_blen;
+ /* Spec says Buffer xfer Length field in number of LBs in dout */
+ cum_lb = 0;
+ for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
+ lba = get_unaligned_be64(up + 0);
+ num = get_unaligned_be32(up + 8);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
+ my_name, __func__, k, lba, num, sg_off);
+ if (num == 0)
+ continue;
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto err_out_unlock;
+ num_by = num * lb_size;
+ ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
+
+ if ((cum_lb + num) > bt_len) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: sum of blocks > data provided\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
+ 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+
+ /* DIX + T10 DIF */
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
+ int prot_ret = prot_verify_write(scp, lba, num,
+ ei_lba);
+
+ if (prot_ret) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+
+ ret = do_device_access(scp, sg_off, lba, num, true);
+ if (unlikely(scsi_debug_lbp()))
+ map_region(lba, num);
+ if (unlikely(-1 == ret)) {
+ ret = DID_ERROR << 16;
+ goto err_out_unlock;
+ } else if (unlikely(sdebug_verbose && (ret < num_by)))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num_by, ret);
+
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
+
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+ }
+ sg_off += num_by;
+ cum_lb += num;
+ }
+ ret = 0;
+err_out_unlock:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+err_out:
+ kfree(lrdp);
+ return ret;
+}
+
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
* from data-in into arr. Safe (atomic) since write_lock held. */
fake_storep_hold = fake_storep;
fake_storep = arr;
- ret = do_device_access(scp, 0, dnum, true);
+ ret = do_device_access(scp, 0, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
if (!sdbg_host) {
pr_err("Host info NULL\n");
return NULL;
- }
+ }
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
- (devip->target == sdev->id) &&
- (devip->lun == sdev->lun))
- return devip;
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
else {
if ((!devip->used) && (!open_devip))
open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
+ config_cdb_len(sdp);
return 0;
}
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp)
+static void stop_qc_helper(struct sdebug_defer *sd_dp,
+ enum sdeb_defer_type defer_t)
{
if (!sd_dp)
return;
- if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ if (defer_t == SDEB_DEFER_HRT)
hrtimer_cancel(&sd_dp->hrt);
- else if (sdebug_jdelay < 0)
+ else if (defer_t == SDEB_DEFER_WQ)
cancel_work_sync(&sd_dp->ew.work);
}
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
int j, k, qmax, r_qmax;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
return true;
}
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *devip;
- struct scsi_device * sdp;
- struct Scsi_Host * hp;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
+ &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
++num_host_resets;
if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
}
- }
- spin_unlock(&sdebug_host_list_lock);
+ }
+ spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
- starts[0] = sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+ sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
* SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+ int scsi_result, int delta_jiff, int ndelay)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
- if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ if (sd_dp == NULL) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (sd_dp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (delta_jiff > 0 || ndelay > 0) {
ktime_t kt;
if (delta_jiff > 0) {
- struct timespec ts;
-
- jiffies_to_timespec(delta_jiff, &ts);
- kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
} else
- kt = sdebug_ndelay;
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ kt = ndelay;
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_HRT;
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
sqcp->sd_dp = sd_dp;
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_WQ;
schedule_work(&sd_dp->ew.work);
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
*/
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
-MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
+ SDEBUG_VERSION "\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int opts;
+ int opts;
char work[20];
- if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strncasecmp(work,"0x", 2)) {
- if (1 == sscanf(&work[2], "%x", &opts))
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &opts) == 0)
goto opts_done;
} else {
- if (1 == sscanf(work, "%d", &opts))
+ if (kstrtoint(work, 10, &opts) == 0)
goto opts_done;
}
}
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int nth;
+ int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(uuid_ctl);
+static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
+}
+static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int ret, n;
+
+ ret = kstrtoint(buf, 0, &n);
+ if (ret)
+ return ret;
+ sdebug_cdb_len = n;
+ all_config_cdb_len();
+ return count;
+}
+static DRIVER_ATTR_RW(cdb_len);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
+ &driver_attr_cdb_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
host_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
- }
- }
+ break;
+ }
+ }
if (sdebug_verbose)
pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
static void sdebug_release_adapter(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
sdbg_host = to_sdebug_host(dev);
- kfree(sdbg_host);
+ kfree(sdbg_host);
}
static int sdebug_add_adapter(void)
{
int k, devs_per_host;
- int error = 0;
- struct sdebug_host_info *sdbg_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
- if (NULL == sdbg_host) {
+ sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
+ if (sdbg_host == NULL) {
pr_err("out of memory at line %d\n", __LINE__);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
- for (k = 0; k < devs_per_host; k++) {
+ for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ error = -ENOMEM;
goto clean;
- }
- }
+ }
+ }
- spin_lock(&sdebug_host_list_lock);
- list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
- sdbg_host->dev.bus = &pseudo_lld_bus;
- sdbg_host->dev.parent = pseudo_primary;
- sdbg_host->dev.release = &sdebug_release_adapter;
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
- error = device_register(&sdbg_host->dev);
+ error = device_register(&sdbg_host->dev);
- if (error)
+ if (error)
goto clean;
++sdebug_add_host;
- return error;
+ return error;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
}
kfree(sdbg_host);
- return error;
+ return error;
}
static void sdebug_remove_adapter(void)
{
- struct sdebug_host_info * sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host = NULL;
- spin_lock(&sdebug_host_list_lock);
- if (!list_empty(&sdebug_host_list)) {
- sdbg_host = list_entry(sdebug_host_list.prev,
- struct sdebug_host_info, host_list);
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
list_del(&sdbg_host->host_list);
}
- spin_unlock(&sdebug_host_list_lock);
+ spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
+static bool fake_host_busy(struct scsi_cmnd *scp)
+{
+ return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
+ (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
b);
}
+ if (fake_host_busy(scp))
+ return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
errsts = r_pfp(scp, devip);
fini:
- return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
+ if (F_DELAY_OVERR & flags)
+ return schedule_resp(scp, devip, errsts, 0, 0);
+ else
+ return schedule_resp(scp, devip, errsts, sdebug_jdelay,
+ sdebug_ndelay);
check_cond:
- return schedule_resp(scp, devip, check_condition_result, 0);
+ return schedule_resp(scp, devip, check_condition_result, 0, 0);
err_out:
- return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
if (sdebug_mq_active)
hpnt->nr_hw_queues = submit_queues;
- sdbg_host->shost = hpnt;
+ sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
if (sdebug_every_nth) /* need stats counters for every_nth */
sdebug_statistics = true;
- error = scsi_add_host(hpnt, &sdbg_host->dev);
- if (error) {
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
pr_err("scsi_add_host failed\n");
- error = -ENODEV;
+ error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else
scsi_scan_host(hpnt);
return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
static int sdebug_driver_remove(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
return -ENODEV;
}
- scsi_remove_host(sdbg_host->shost);
+ scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
- list_del(&sdbg_devinfo->dev_list);
- kfree(sdbg_devinfo);
- }
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
- scsi_host_put(sdbg_host->shost);
- return 0;
+ scsi_host_put(sdbg_host->shost);
+ return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c03f2c1..c3765d29fd3f 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
{
struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
- char buf[80];
+ const u8 *const cdb = READ_ONCE(cmd->cmnd);
+ char buf[80] = "(?)";
- __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+ if (cdb)
+ __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
cmd->retries, msecs / 1000, msecs % 1000);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 78d4aa8df675..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
};
-static const char spaces[] = " "; /* 16 of them */
static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -109,8 +108,8 @@ static struct {
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
- {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
- {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -120,7 +119,7 @@ static struct {
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
- {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -159,8 +158,8 @@ static struct {
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
- {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
- {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -182,15 +181,14 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
- {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -256,7 +254,6 @@ static struct {
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
- {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -298,20 +295,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
size_t from_length;
from_length = strlen(from);
- strncpy(to, from, min(to_length, from_length));
- if (from_length < to_length) {
- if (compatible) {
- /*
- * NUL terminate the string if it is short.
- */
- to[from_length] = '\0';
- } else {
- /*
- * space pad the string if it is short.
- */
- strncpy(&to[from_length], spaces,
- to_length - from_length);
- }
+ /* This zero-pads the destination */
+ strncpy(to, from, to_length);
+ if (from_length < to_length && !compatible) {
+ /*
+ * space pad the string if it is short.
+ */
+ memset(&to[from_length], ' ', to_length - from_length);
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -361,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, blist_flags_t flags, int key)
+ char *strflags, blist_flags_t flags,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -382,10 +373,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
model, compatible);
if (strflags)
- devinfo->flags = simple_strtoul(strflags, NULL, 0);
- else
- devinfo->flags = flags;
-
+ flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+ devinfo->flags = flags;
devinfo->compatible = compatible;
if (compatible)
@@ -412,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
- const char *model, int key)
+ const char *model, enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -458,7 +447,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
/*
* vendor strings must be an exact match
*/
- if (vmax != strlen(devinfo->vendor) ||
+ if (vmax != strnlen(devinfo->vendor,
+ sizeof(devinfo->vendor)) ||
memcmp(devinfo->vendor, vskip, vmax))
continue;
@@ -466,7 +456,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* @model specifies the full string, and
* must be larger or equal to devinfo->model
*/
- mlen = strlen(devinfo->model);
+ mlen = strnlen(devinfo->model, sizeof(devinfo->model));
if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
@@ -494,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
*
* Returns: 0 OK, -error on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *found;
@@ -596,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key)
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
@@ -783,7 +769,7 @@ void scsi_exit_devinfo(void)
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
-int scsi_dev_info_add_list(int key, const char *name)
+int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
@@ -815,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
-int scsi_dev_info_remove_list(int key)
+int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
-/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
+ lockdep_assert_held(shost->host_lock);
+
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 00742c50cd44..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
* for a requeue after completion, which should only occur in this
* file.
*/
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, true);
}
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ACTION_RETRY:
/* Retry the same command immediately */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
break;
}
}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
* Called from inside blk_get_request() for pass-through requests and from
* inside scsi_init_command() for filesystem requests.
*/
-void scsi_initialize_rq(struct request *rq)
+static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-EXPORT_SYMBOL(scsi_initialize_rq);
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
@@ -1967,6 +1983,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
out_put_device:
put_device(&sdev->sdev_gendev);
out:
+ if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
}
@@ -2018,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
/* list of keys for the lists */
-enum {
+enum scsi_devinfo_key {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key);
+ enum scsi_devinfo_key key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- blist_flags_t flags, int key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
-extern int scsi_dev_info_add_list(int key, const char *name);
-extern int scsi_dev_info_remove_list(int key);
+ blist_flags_t flags,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
+extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
-static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
/*
* internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index be5e919db0e8..0880d975eed3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int *bflags, int async)
+ blist_flags_t *bflags, int async)
{
int ret;
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
- u64 lun, int *bflagsp,
+ u64 lun, blist_flags_t *bflagsp,
struct scsi_device **sdevp,
enum scsi_scan_mode rescan,
void *hostdata)
{
struct scsi_device *sdev;
unsigned char *result;
- int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+ blist_flags_t bflags;
+ int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
/*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct scsi_target *starget,
- int bflags, int scsi_level,
+ blist_flags_t bflags, int scsi_level,
enum scsi_scan_mode rescan)
{
uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* 0: scan completed (or no memory, so further scanning is futile)
* 1: could not scan with REPORT LUN
**/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
enum scsi_scan_mode rescan)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun, enum scsi_scan_mode rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
- int bflags = 0;
+ blist_flags_t bflags = 0;
int res;
struct scsi_target *starget;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 50e7d7e4a861..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name) \
+ [ilog2((__force unsigned int)BLIST_##name)] = #name
static const char *const sdev_bflags_name[] = {
#include "scsi_devinfo_tbl.c"
};
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
const char *name = NULL;
- if (!(sdev->sdev_bflags & BIT(i)))
+ if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
continue;
if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
name = sdev_bflags_name[i];
@@ -1277,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add device: %d\n", error);
- scsi_dh_remove_device(sdev);
return error;
}
@@ -1286,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
- scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1353,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
- scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
@@ -1414,7 +1412,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
* check.
*/
if (sdev->channel != starget->channel ||
- sdev->id != starget->id ||
+ sdev->id != starget->id)
+ continue;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL ||
!get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
- { FC_PORTSPEED_64BIT, "64 Gbit" },
- { FC_PORTSPEED_128BIT, "128 Gbit" },
+ { FC_PORTSPEED_64GBIT, "64 Gbit" },
+ { FC_PORTSPEED_128GBIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index d0219e36080c..871ea582029e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -50,14 +51,14 @@
/* Our blacklist flags */
enum {
- SPI_BLIST_NOIUS = 0x1,
+ SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
};
/* blacklist table, modelled on scsi_devinfo.c */
static struct {
char *vendor;
char *model;
- unsigned flags;
+ blist_flags_t flags;
} spi_static_device_list[] __initdata = {
{"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
{"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +222,11 @@ static int spi_device_configure(struct transport_container *tc,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
- unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
- &sdev->inquiry[16],
- SCSI_DEVINFO_SPI);
+ blist_flags_t bflags;
+
+ bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
/* Populate the target capability fields with the values
* gleaned from the device inquiry */
@@ -1007,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1047,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 24fe68522716..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
+ u8 *cmnd;
if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
__free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ cmnd = SCpnt->cmnd;
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
+ mempool_free(cmnd, sd_cdb_pool);
}
}
@@ -2170,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
/* Wait 1 second for next try */
msleep(1000);
- printk(".");
+ printk(KERN_CONT ".");
/*
* Wait for USB flash devices with slow firmware.
@@ -2200,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (spintime) {
if (scsi_status_is_good(the_result))
- printk("ready\n");
+ printk(KERN_CONT "ready\n");
else
- printk("not responding...\n");
+ printk(KERN_CONT "not responding...\n");
}
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f098877eed4a..0c434453aab3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,10 +1140,10 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
}
#endif
-static unsigned int
+static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
- unsigned int res = 0;
+ __poll_t res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
@@ -1174,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait)
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
- "sg_poll: res=0x%x\n", (int) res));
+ "sg_poll: res=0x%x\n", (__force u32) res));
return res;
}
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I.
-obj-m += smartpqi.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
static DEVICE_ATTR_RO(read_byte_cnt);
/**
- * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * read_ns_show - return read ns - overall time spent waiting on reads in ns.
* @dev: struct device
* @attr: attribute structure
* @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1b06cf0375dc..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
@@ -1833,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
- if (!stor_device->rport)
+ if (!stor_device->rport) {
+ ret = -ENOMEM;
goto err_out4;
+ }
}
#endif
return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
#define UTP_TRANSFER_REQ_LIST_READY 0x2
#define UTP_TASK_REQ_LIST_READY 0x4
#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
static int wd719x_board_found(struct Scsi_Host *sh)
{
struct wd719x *wd = shost_priv(sh);
- char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ static const char * const card_types[] = {
+ "Unknown card", "WD7193", "WD7197", "WD7296"
+ };
int ret;
INIT_LIST_HEAD(&wd->active_scbs);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 77fe55ce790c..7dcb14d303eb 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
@@ -79,6 +81,7 @@
#define A3700_SPI_BYTE_LEN BIT(5)
#define A3700_SPI_CLK_PRESCALE BIT(0)
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS (0x10)
#define A3700_SPI_WFIFO_THRS_BIT 28
#define A3700_SPI_RFIFO_THRS_BIT 24
@@ -183,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
-static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val |= A3700_SPI_FIFO_MODE;
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
@@ -220,6 +226,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+ /* For prescaler values over 15, we can only set it by steps of 2.
+ * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+ * 30. We only use this range from 16 to 30.
+ */
+ if (prescale > 15)
+ prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
@@ -289,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
- a3700_spi_fifo_mode_set(a3700_spi);
+ a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
@@ -408,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
- unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
- byte_len = xfer->bits_per_word >> 3;
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
- a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
@@ -483,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
@@ -506,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
- u32 data = le32_to_cpu(val);
- memcpy(a3700_spi->rx_buf, &data, 4);
+ memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
@@ -571,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
- a3700_spi_bytelen_set(a3700_spi, 4);
-
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
- unsigned int nbits = 0;
+ unsigned int nbits = 0, byte_len;
u32 val;
- a3700_spi_transfer_setup(spi, xfer);
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
- a3700_spi->tx_buf = xfer->tx_buf;
- a3700_spi->rx_buf = xfer->rx_buf;
- a3700_spi->buf_len = xfer->len;
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
@@ -607,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
@@ -721,6 +742,63 @@ out:
return ret;
}
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
@@ -770,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
- master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
@@ -810,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f95da364c283..4a11fc0d4136 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
struct completion xfer_completion;
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
- return atmel_spi_use_dma(as, xfer);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
complete(&as->xfer_completion);
}
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- xfer->rx_sg.sgl, xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan,
- xfer->tx_sg.sgl, xfer->tx_sg.nents,
- DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!txdesc)
goto err_dma;
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
-#ifdef CONFIG_SOC_SAM_V4_V5
- /*
- * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
- * since this later function tries to map buffers with dma_map_sg()
- * even if they have not been allocated inside DMA-safe areas.
- * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
- * those ARM cores, the data cache follows the PIPT model.
- * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
- * In case of PIPT caches, there cannot be cache aliases.
- * However on ARM9 cores, the data cache follows the VIVT model, hence
- * the cache aliases issue can occur when buffers are allocated from
- * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
- * not taken into account or at least not handled completely (cache
- * lines of aliases are not invalidated).
- * This is not a theorical issue: it was reproduced when trying to mount
- * a UBI file-system on a at91sam9g35ek board.
- */
- as->caps.has_dma_support = false;
-#else
as->caps.has_dma_support = version >= 0x212;
-#endif
as->caps.has_pdc_support = version < 0x212;
}
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
@@ -1661,12 +1708,20 @@ static int atmel_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
- spin_lock_irq(&as->lock);
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
}
+ spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 6e409eabe1c9..d02ceb7a29d1 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
-
- size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
-
- b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
- for (i = 0; i < b53spi->read_offset + len; i++) {
+ for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
- if (!cont && i == b53spi->read_offset + len - 1)
+ if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
- bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
- b53spi->read_offset + len - 1);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
- int offset = b53spi->read_offset + i;
+ u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
- r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
-
- b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
- bool cont = left - to_write > 0;
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
- size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
- left);
- bool cont = left - to_read > 0;
+ size_t to_read = min_t(size_t, 16, left);
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 6ddb6ef1fda4..60d59b003aa4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ init_completion(&dspi->done);
+
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
- init_completion(&dspi->done);
-
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b217c22ff72f..211cc7d75bf8 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
- u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index f652f70cb8db..0630962ce442 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -903,10 +903,9 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi",
- .data = (void *)&ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
+ { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -980,7 +979,7 @@ static int dspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 79ddefe4180d..6f57592a7f95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1622,6 +1622,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
/* Request GPIO CS lines, if any */
if (!spi_imx->slave_mode && master->cs_gpios) {
@@ -1640,12 +1645,6 @@ static int spi_imx_probe(struct platform_device *pdev)
}
}
- ret = spi_bitbang_start(&spi_imx->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
@@ -1668,12 +1667,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
spi_bitbang_stop(&spi_imx->bitbang);
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cebfea5faa4b..dafed6280df3 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -198,8 +198,10 @@ static int jcore_spi_probe(struct platform_device *pdev)
/* Register our spi controller */
err = devm_spi_register_master(&pdev->dev, master);
- if (err)
+ if (err) {
+ clk_disable(clk);
goto exit;
+ }
return 0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f8429635502..5c82910e3480 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -599,6 +599,7 @@ static int meson_spicc_remove(struct platform_device *pdev)
static const struct of_device_id meson_spicc_of_match[] = {
{ .compatible = "amlogic,meson-gx-spicc", },
+ { .compatible = "amlogic,meson-axg-spicc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8974bb340b3a..deca63e82ff6 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -94,6 +94,7 @@ struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
@@ -634,6 +635,16 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status)
goto out;
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(spi->axi_clk) &&
+ PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
tclk_hz = clk_get_rate(spi->clk);
/*
@@ -658,7 +669,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
/* Scan all SPI devices of this controller for direct mapped devices */
@@ -696,7 +707,7 @@ static int orion_spi_probe(struct platform_device *pdev)
PAGE_SIZE);
if (!spi->direct_access[cs].vaddr) {
status = -ENOMEM;
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
spi->direct_access[cs].size = PAGE_SIZE;
@@ -724,6 +735,8 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_pm:
pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
@@ -738,6 +751,7 @@ static int orion_spi_remove(struct platform_device *pdev)
struct orion_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_master(master);
@@ -754,6 +768,7 @@ static int orion_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
@@ -763,6 +778,8 @@ static int orion_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4cb515a3104c..b0822d1dba29 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1237,7 +1237,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
@@ -1417,7 +1417,7 @@ static void cleanup(struct spi_device *spi)
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
kfree(chip);
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2ce875764ca6..0835a8d88fb8 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets SPCMD */
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in master mode */
- rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index de7df20f8712..baa3a9fa2638 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,17 +1,7 @@
-/*
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fcd261f98b9f..c5dcfb434a49 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -55,9 +56,14 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ unsigned short unused_ss;
+ bool native_cs_inited;
+ bool native_cs_high;
bool slave_aborted;
};
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
#define TMDR1 0x00 /* Transmit Mode Register 1 */
#define TMDR2 0x04 /* Transmit Mode Register 2 */
#define TMDR3 0x08 /* Transmit Mode Register 3 */
@@ -91,6 +97,8 @@ struct sh_msiof_spi_priv {
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
@@ -324,7 +332,7 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return val;
}
-static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
@@ -342,10 +350,13 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->master)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
- else
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ } else {
+ sh_msiof_write(p, TMDR1,
+ tmp | MDR1_TRMD | TMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ }
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -528,8 +539,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-
- pm_runtime_get_sync(&p->pdev->dev);
+ u32 clr, set, tmp;
if (!np) {
/*
@@ -539,19 +549,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
- /* Configure pins before deasserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ return 0;
+ }
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (spi_controller_is_slave(p->master))
+ return 0;
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+ /* Configure native chip select mode/polarity early */
+ clr = MDR1_SYNCMD_MASK;
+ set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(MDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(MDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, TMDR1) & ~clr;
+ sh_msiof_write(p, TMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
-
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
return 0;
}
@@ -560,13 +582,20 @@ static int sh_msiof_prepare_message(struct spi_master *master,
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
/* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ss = p->unused_ss;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
@@ -784,11 +813,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx fifo to be emptied / rx fifo to be filled */
+ /* wait for tx/rx DMA completion */
ret = sh_msiof_wait_for_completion(p);
if (ret)
goto stop_reset;
+ if (!rx) {
+ reinit_completion(&p->done);
+ sh_msiof_write(p, IER, IER_TEOFE);
+
+ /* wait for tx fifo to be emptied */
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
+ goto stop_reset;
+ }
+
/* clear status bits */
sh_msiof_reset_str(p);
@@ -912,9 +951,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&p->pdev->dev),
- dev_name(&p->pdev->dev));
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
break;
}
if (ret)
@@ -1071,6 +1109,45 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
+static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ unsigned int used_ss_mask = 0;
+ unsigned int cs_gpios = 0;
+ unsigned int num_cs, i;
+ int ret;
+
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ return 0;
+
+ num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ for (i = 0; i < num_cs; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
+ if (!IS_ERR(gpiod)) {
+ cs_gpios++;
+ continue;
+ }
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return PTR_ERR(gpiod);
+
+ if (i >= MAX_SS) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ used_ss_mask |= BIT(i);
+ }
+ p->unused_ss = ffz(used_ss_mask);
+ if (cs_gpios && p->unused_ss >= MAX_SS) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1284,13 +1361,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
+ /* Setup GPIO chip selects */
+ master->num_chipselect = p->info->num_chipselect;
+ ret = sh_msiof_get_cs_gpios(p);
+ if (ret)
+ goto err1;
+
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index bbb1a275f718..f009d76f96b1 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1072,7 +1072,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- struct sirf_spi_comp_data *spi_comp_data;
+ const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
const struct of_device_id *match;
@@ -1092,7 +1092,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ spi_comp_data = match->data;
sspi->regs = spi_comp_data->regs;
sspi->type = spi_comp_data->type;
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index c5cd635c28f3..41410031f8e9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -525,7 +525,7 @@ err_free_master:
static int sun4i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fb38234249a8..8533f4edd00a 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -541,7 +541,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b93dfc..63fedc49ae9c 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
while (remaining_words) {
int n_words, tx_words, rx_words;
u32 sr;
+ int stalled;
n_words = min(remaining_words, xspi->buffer_size);
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
+ stalled = 10;
while (rx_words) {
+ if (rx_words == n_words && !(stalled--) &&
+ !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+ (sr & XSPI_SR_RX_EMPTY_MASK)) {
+ dev_err(&spi->dev,
+ "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+ xspi_init_hw(xspi);
+ return -EIO;
+ }
+
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
xilinx_spi_rx(xspi);
rx_words--;
@@ -370,6 +381,7 @@ static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
}
static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219c2324..ee18428a051f 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -3,10 +3,7 @@ config SSB_POSSIBLE
depends on HAS_IOMEM && HAS_DMA
default y
-menu "Sonics Silicon Backplane"
- depends on SSB_POSSIBLE
-
-config SSB
+menuconfig SSB
tristate "Sonics Silicon Backplane support"
depends on SSB_POSSIBLE
help
@@ -21,6 +18,8 @@ config SSB
If unsure, say N.
+if SSB
+
# Common SPROM support routines
config SSB_SPROM
bool
@@ -32,7 +31,7 @@ config SSB_BLOCKIO
config SSB_PCIHOST_POSSIBLE
bool
- depends on SSB && (PCI = y || PCI = SSB)
+ depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
default y
config SSB_PCIHOST
@@ -185,4 +184,4 @@ config SSB_DRIVER_GPIO
If unsure, say N
-endmenu
+endif # SSB
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df14c9d..372ce9913e6d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
+ mutex_unlock(&ashmem_mutex);
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index a517b2d29f1b..8f6494158d3d 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
config ION_CMA_HEAP
bool "Ion CMA heap support"
- depends on ION && CMA
+ depends on ION && DMA_CMA
help
Choose this option to enable CMA heaps with Ion. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a7d9b0e98572..f480885e346b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dd5545d9990a..86196ffd2faf 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct sg_table *table;
struct page *pages;
+ unsigned long size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
int ret;
- pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (ret)
goto free_mem;
- sg_set_page(table->sgl, pages, len, 0);
+ sg_set_page(table->sgl, pages, size, 0);
buffer->priv_virt = pages;
buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
free_mem:
kfree(table);
err:
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
return -ENOMEM;
}
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct page *pages = buffer->priv_virt;
+ unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
/* release memory */
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
/* release sg table */
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 1799d3f26a9e..2035835b62dc 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
- int rc;
+ int rc = 0;
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e19e395b0e44..491b54d986eb 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2276,9 +2276,9 @@ done:
return retval;
}
-static unsigned int comedi_poll(struct file *file, poll_table *wait)
+static __poll_t comedi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s, *s_read;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index cc18e25103ca..a557be8a5076 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -119,7 +119,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
poll_initwait(&table);
while (1) {
long elapsed;
- int mask;
+ __poll_t mask;
mask = f->f_op->poll(f, &table.pt);
if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index b82a47b9ef0b..f1d128b2dae9 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -1737,12 +1737,12 @@ static int irda_shutdown(struct socket *sock, int how)
/*
* Function irda_poll (file, sock, wait)
*/
-static unsigned int irda_poll(struct file * file, struct socket *sock,
+static __poll_t irda_poll(struct file * file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c
index 7025dcb853d0..75bf9e34311d 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.c
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.c
@@ -419,12 +419,12 @@ irnet_ctrl_read(irnet_socket * ap,
* Poll : called when someone do a select on /dev/irnet.
* Just check if there are new events...
*/
-static inline unsigned int
+static inline __poll_t
irnet_ctrl_poll(irnet_socket * ap,
struct file * file,
poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
@@ -608,12 +608,12 @@ dev_irnet_read(struct file * file,
/*
* Poll : called when someone do a select on /dev/irnet
*/
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file * file,
poll_table * wait)
{
irnet_socket * ap = file->private_data;
- unsigned int mask;
+ __poll_t mask;
DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
file, ap);
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h
index 32061442cc8e..e6d5aa2a8aac 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.h
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.h
@@ -70,7 +70,7 @@ static ssize_t
char __user *,
size_t,
loff_t *);
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file *,
poll_table *);
static long
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 986c2a40d978..8267119ccc8e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
ksocknal_nid2peerlist(id.nid));
}
- route2 = NULL;
list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
- if (route2->ksnr_ipaddr == ipaddr)
- break;
-
- route2 = NULL;
- }
- if (!route2) {
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
+ if (route2->ksnr_ipaddr == ipaddr) {
+ /* Route already exists, use the old one */
+ ksocknal_route_decref(route);
+ route2->ksnr_share_count++;
+ goto out;
+ }
}
-
+ /* Route doesn't already exist, add the new one */
+ ksocknal_add_route_locked(peer, route);
+ route->ksnr_share_count++;
+out:
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index 2e5d311d2438..db81ed527452 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -120,6 +120,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d49d4865298..ed46aaca0ba3 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -314,19 +314,20 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
+ struct kvec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_flags = 0
+ };
LASSERT(nob > 0);
LASSERT(jiffies_left > 0);
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob);
+ for (;;) {
/* Set receive timeout to remaining time */
jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
@@ -338,7 +339,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
}
then = jiffies;
- rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
+ rc = sock_recvmsg(sock, &msg, 0);
jiffies_left -= jiffies - then;
if (rc < 0)
@@ -347,10 +348,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
if (!rc)
return -ECONNRESET;
- buffer = ((char *)buffer) + rc;
- nob -= rc;
-
- if (!nob)
+ if (!msg_data_left(&msg))
return 0;
if (jiffies_left <= 0)
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b2e47c246f3..6f59045be0f9 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -369,8 +369,6 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
}
ctx->pos = pos;
ll_finish_md_op_data(op_data);
- filp->f_version = inode->i_version;
-
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
@@ -1678,7 +1676,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
else
fd->lfd_pos = offset;
file->f_pos = offset;
- file->f_version = 0;
}
ret = offset;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index b133fd00c08c..0d62fcf016dc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
ll_d2d(dentry)->lld_invalid = 1;
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
+ if (d_count(dentry) == 0)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index dd7596d8763d..6657ebbe068a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -1255,7 +1255,7 @@ static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&pipe->outq, vma);
}
-static unsigned int atomisp_poll(struct file *file,
+static __poll_t atomisp_poll(struct file *file,
struct poll_table_struct *pt)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 5d3b0e5a1283..4ffff6f8b809 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2174,11 +2174,11 @@ static int bcm2048_fops_release(struct file *file)
return 0;
}
-static unsigned int bcm2048_fops_poll(struct file *file,
+static __poll_t bcm2048_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(file, &bdev->read_queue, pts);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 155e8c758e4b..588743a6fd8a 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -573,7 +573,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll() - It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 6bd0717bf76e..a003603af725 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1197,12 +1197,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
}
/* copied from lirc_dev */
-static unsigned int poll(struct file *filep, poll_table *wait)
+static __poll_t poll(struct file *filep, poll_table *wait)
{
struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l->buf;
- unsigned int ret;
+ __poll_t ret;
dev_dbg(ir->dev, "%s called\n", __func__);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 9e2f0421a01e..a3a83424a926 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1185,7 +1185,7 @@ static int iss_video_release(struct file *file)
return 0;
}
-static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+static __poll_t iss_video_poll(struct file *file, poll_table *wait)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index 1e5cbc893496..69f530972273 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -287,10 +287,10 @@ aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
return copied;
}
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_poll(struct file *filp, poll_table *wait)
{
struct aim_channel *c = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &c->wq, wait);
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
index e0748416aee5..7783bc2dd9f5 100644
--- a/drivers/staging/most/aim-v4l2/video.c
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -209,11 +209,11 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
return ret;
}
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_vdev_poll(struct file *filp, poll_table *wait)
{
struct aim_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* only wait if no data is available */
if (!data_ready(mdev))
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c594b12..264ad362d858 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
+ return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
enable_read_hw_ecc = 1;
- chip->read_buf(mtd, p, eccsize * eccsteps);
+ nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a2153c999..12c9df9cddde 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
currentValue = READ_REG(REG_DATAMODUL);
- switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define
+ switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
default: return undefined;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index d99daf69e501..585c6aa124cd 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -326,10 +326,10 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
+static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(fp, &speakup_event, wait);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a3d4610fbdbe..4c8c6fa0a79f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -134,7 +134,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
if (copied < 0)
return (int)copied;
- if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+ if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
return -EFAULT;
return copied;
@@ -146,7 +146,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
if (count > image[minor].size_buf)
count = image[minor].size_buf;
- if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+ if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
return -EFAULT;
return vme_master_write(image[minor].resource, image[minor].kern_buf,
@@ -159,7 +159,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+ if (copy_to_user(buf, image_ptr, (unsigned long)count))
return -EFAULT;
return count;
@@ -171,7 +171,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+ if (copy_from_user(image_ptr, buf, (unsigned long)count))
return -EFAULT;
return count;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index e2bc99980f75..4c44d7bed01a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
select CONFIGFS_FS
select CRC_T10DIF
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c69b4a9694d..0d99b242e82e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
- rc = blk_rq_append_bio(req, bio);
+ rc = blk_rq_append_bio(req, &bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
if (bio) {
- rc = blk_rq_append_bio(req, bio);
+ rc = blk_rq_append_bio(req, &bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 58caacd54a3b..c03a78ee26cd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2300,13 +2300,7 @@ queue_full:
void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
-
- kfree(sgl);
+ sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
@@ -2414,42 +2408,10 @@ int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
- struct scatterlist *sg;
- struct page *page;
- gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nalloc, nent;
- int i = 0;
-
- nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
- if (chainable)
- nalloc++;
- sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return -ENOMEM;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
- sg_init_table(sg, nalloc);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dc63aba092e4..dfd23245f778 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,6 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
@@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
}
/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
- * @tz: thermal zone device in which we're operating
- * @freq: frequency in KHz
- * @power: pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq. This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered. If it wasn't, the static power is assumed to
- * be negligible. The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
- struct thermal_zone_device *tz, unsigned long freq,
- u32 *power)
-{
- struct dev_pm_opp *opp;
- unsigned long voltage;
- struct cpufreq_policy *policy = cpufreq_cdev->policy;
- struct cpumask *cpumask = policy->related_cpus;
- unsigned long freq_hz = freq * 1000;
- struct device *dev;
-
- if (!cpufreq_cdev->plat_get_static_power) {
- *power = 0;
- return 0;
- }
-
- dev = get_cpu_device(policy->cpu);
- WARN_ON(!dev);
-
- opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
- if (IS_ERR(opp)) {
- dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
- freq_hz, PTR_ERR(opp));
- return -EINVAL;
- }
-
- voltage = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (voltage == 0) {
- dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
- freq_hz);
- return -EINVAL;
- }
-
- return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
-}
-
-/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
{
unsigned long freq;
- int i = 0, cpu, ret;
- u32 static_power, dynamic_power, total_load = 0;
+ int i = 0, cpu;
+ u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret) {
- kfree(load_cpu);
- return ret;
- }
+ *power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, dynamic_power,
- static_power);
+ load_cpu, i, *power);
kfree(load_cpu);
}
- *power = static_power + dynamic_power;
return 0;
}
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- u32 static_power, dynamic_power;
- int ret;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
freq = cpufreq_cdev->freq_table[state].frequency;
- dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret)
- return ret;
+ *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- *power = static_power + dynamic_power;
- return ret;
+ return 0;
}
/**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
unsigned int cur_freq, target_freq;
- int ret;
- s32 dyn_power;
- u32 last_load, normalised_power, static_power;
+ u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cur_freq = cpufreq_quick_get(policy->cpu);
- ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
- if (ret)
- return ret;
-
- dyn_power = power - static_power;
- dyn_power = dyn_power > 0 ? dyn_power : 0;
+ power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
- normalised_power = (dyn_power * 100) / last_load;
+ normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
+ struct cpufreq_policy *policy, u32 capacitance)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
}
if (capacitance) {
- cpufreq_cdev->plat_get_static_power = plat_static_func;
-
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- if (!np)
- return ERR_PTR(-EINVAL);
-
- return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model. The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
-{
- return __cpufreq_cooling_register(NULL, policy, capacitance,
- plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np: a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
*/
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- if (!np)
- return ERR_PTR(-EINVAL);
+ struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+ struct thermal_cooling_device *cdev = NULL;
+ u32 capacitance = 0;
+
+ if (!np) {
+ pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ policy->cpu);
+ return NULL;
+ }
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &capacitance);
- return __cpufreq_cooling_register(np, policy, capacitance,
- plat_static_func);
+ cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ if (IS_ERR(cdev)) {
+ pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ policy->cpu, PTR_ERR(cdev));
+ cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+ return cdev;
}
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 419a7a90bce0..f45bcbc63738 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
return;
if (ring->start_poll) {
- __ring_interrupt_mask(ring, false);
+ __ring_interrupt_mask(ring, true);
ring->start_poll(ring->poll_data);
} else {
schedule_work(&ring->work);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5131bdc9e765..0edf4fcfea23 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2457,10 +2457,10 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
* Called without the kernel lock held - fine
*/
-static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gsm_mux *gsm = tty->disc_data;
poll_wait(file, &tty->read_wait, wait);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index eea7b6cb3cc4..929434ebee50 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -180,7 +180,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
@@ -796,11 +796,11 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
* to caller.
* Returns a bit mask containing info on which ops will not block.
*/
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 30bb0900cd2f..e81d3db8ad63 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -135,7 +135,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
@@ -1216,14 +1216,14 @@ static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* Called without the kernel lock held - fine */
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait)
{
struct r3964_info *pInfo = tty->disc_data;
struct r3964_client_info *pClient;
struct r3964_message *pMsg = NULL;
unsigned long flags;
- int result = POLLOUT;
+ __poll_t result = POLLOUT;
TRACE_L("POLL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 427e0d5d8f13..478a9b40fd03 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
- if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->read_tail;
if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2368,10 +2368,10 @@ break_out:
* Called without the kernel lock held - fine
*/
-static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
down_write(&tty->termios_rwsem);
- if (L_ICANON(tty))
+ if (L_ICANON(tty) && !L_EXTPROC(tty))
retval = inq_canon(ldata);
else
retval = read_cnt(ldata);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 1bef39828ca7..5a135ed46159 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -132,6 +132,33 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
@@ -225,6 +252,18 @@ void serdev_device_set_flow_control(struct serdev_device *serdev, bool enable)
}
EXPORT_SYMBOL_GPL(serdev_device_set_flow_control);
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity)
+{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
+ if (!ctrl || !ctrl->ops->set_parity)
+ return -ENOTSUPP;
+
+ return ctrl->ops->set_parity(ctrl, parity);
+}
+EXPORT_SYMBOL_GPL(serdev_device_set_parity);
+
void serdev_device_wait_until_sent(struct serdev_device *serdev, long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
@@ -268,8 +307,8 @@ static int serdev_drv_probe(struct device *dev)
static int serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
- sdrv->remove(to_serdev_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
return 0;
}
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 247788a16f0b..1bdf2959ac5c 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -190,6 +190,29 @@ static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable
tty_set_termios(tty, &ktermios);
}
+static int ttyport_set_parity(struct serdev_controller *ctrl,
+ enum serdev_parity parity)
+{
+ struct serport *serport = serdev_controller_get_drvdata(ctrl);
+ struct tty_struct *tty = serport->tty;
+ struct ktermios ktermios = tty->termios;
+
+ ktermios.c_cflag &= ~(PARENB | PARODD | CMSPAR);
+ if (parity != SERDEV_PARITY_NONE) {
+ ktermios.c_cflag |= PARENB;
+ if (parity == SERDEV_PARITY_ODD)
+ ktermios.c_cflag |= PARODD;
+ }
+
+ tty_set_termios(tty, &ktermios);
+
+ if ((tty->termios.c_cflag & (PARENB | PARODD | CMSPAR)) !=
+ (ktermios.c_cflag & (PARENB | PARODD | CMSPAR)))
+ return -EINVAL;
+
+ return 0;
+}
+
static void ttyport_wait_until_sent(struct serdev_controller *ctrl, long timeout)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
@@ -227,6 +250,7 @@ static const struct serdev_controller_ops ctrl_ops = {
.open = ttyport_open,
.close = ttyport_close,
.set_flow_control = ttyport_set_flow_control,
+ .set_parity = ttyport_set_parity,
.set_baudrate = ttyport_set_baudrate,
.wait_until_sent = ttyport_wait_until_sent,
.get_tiocm = ttyport_get_tiocm,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dc60aeea87d8..00d14d6a76bb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -144,7 +144,7 @@ static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
+static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -443,7 +443,7 @@ static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
}
/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
{
return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
}
@@ -2055,11 +2055,11 @@ retry_open:
* may be re-entered freely by other callers.
*/
-static unsigned int tty_poll(struct file *filp, poll_table *wait)
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
- int ret = 0;
+ __poll_t ret = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
return 0;
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 85b6634f518a..3e64ccd0040f 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -559,11 +559,11 @@ unlock_out:
return ret;
}
-static unsigned int
+static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
- int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+ __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
if (poll) {
poll_wait(file, &poll->waitq, wait);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ff04b7f8549f..85bc1aaea4a4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -496,7 +496,7 @@ static int uio_release(struct inode *inode, struct file *filep)
return ret;
}
-static unsigned int uio_poll(struct file *filep, poll_table *wait)
+static __poll_t uio_poll(struct file *filep, poll_table *wait)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3593ce0ec641..880009987460 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
if (ret)
goto err_mux;
- ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi");
+ ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
if (ulpi_node) {
phy_node = of_get_next_available_child(ulpi_node, NULL);
ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6c181a625daf..9627ea6ec3ae 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -595,11 +595,11 @@ static int wdm_flush(struct file *file, fl_owner_t id)
return usb_translate_errors(desc->werr);
}
-static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
{
struct wdm_device *desc = file->private_data;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index c454885ef4a0..f45e8877771a 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -469,9 +469,9 @@ static int usblp_release(struct inode *inode, struct file *file)
}
/* No kernel lock - fine */
-static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
- int ret;
+ __poll_t ret;
unsigned long flags;
struct usblp *usblp = file->private_data;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 0b8b0f3bdd2f..7ea67a55be10 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1257,10 +1257,10 @@ static int usbtmc_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &data->fasync);
}
-static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
struct usbtmc_device_data *data = file->private_data;
- unsigned int mask;
+ __poll_t mask;
mutex_lock(&data->io_mutex);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 55b198ba629b..c821b4b9647e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+ nintf = nintf_orig = config->desc.bNumInterfaces;
+ config->desc.bNumInterfaces = 0; // Adjusted later
+
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
buffer += config->desc.bLength;
size -= config->desc.bLength;
- nintf = nintf_orig = config->desc.bNumInterfaces;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
@@ -1005,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
case USB_SSP_CAP_TYPE:
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
- USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+ USB_SSP_SUBLINK_SPEED_ATTRIBS);
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break;
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index c2cf62b7043a..e2cec448779e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -622,7 +622,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
}
/* Kernel lock for "lastev" protection */
-static unsigned int usb_device_poll(struct file *file,
+static __poll_t usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int event_count;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a3fad4ec9870..5e72bf36aca4 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -595,7 +595,7 @@ static void async_completed(struct urb *urb)
as->status = urb->status;
signr = as->signr;
if (signr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = as->signr;
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
@@ -2572,11 +2572,11 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
#endif
/* No kernel lock - fine */
-static unsigned int usbdev_poll(struct file *file,
+static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct usb_dev_state *ps = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
@@ -2613,7 +2613,7 @@ static void usbdev_remove(struct usb_device *udev)
wake_up_all(&ps->wait);
list_del_init(&ps->list);
if (ps->discsignr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = ps->discsignr;
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a10b346b9777..4024926c1d68 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -149,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+ /* ELSA MicroLink 56K */
+ { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c94130cac..31749c79045f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
* 2 - Internal DMA
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
+ * @num_dev_in_eps Number of device IN endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
* available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
* 2 - 8 or 16 bits
* @snpsid: Value from SNPSID register
* @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
unsigned fs_phy_type:2;
unsigned i2c_enable:1;
unsigned num_dev_ep:4;
+ unsigned num_dev_in_eps : 4;
unsigned num_dev_perio_in_ep:4;
unsigned total_fifo_size:16;
unsigned power_optimized:1;
unsigned utmi_phy_data_width:2;
u32 snpsid;
u32 dev_ep_dirs;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d092503..e4c3ce0de5de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
{
if (hsotg->hw_params.en_multiple_tx_fifo)
/* In dedicated FIFO mode we need count of IN EPs */
- return (dwc2_readl(hsotg->regs + GHWCFG4) &
- GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
+ return hsotg->hw_params.num_dev_in_eps;
else
/* In shared FIFO mode we need count of Periodic IN EPs */
return hsotg->hw_params.num_dev_perio_in_ep;
}
/**
- * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
- */
-static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
-{
- int val = 0;
- int i;
- u32 ep_dirs;
-
- /*
- * Don't need additional space for ep info control registers in
- * slave mode.
- */
- if (!using_dma(hsotg)) {
- dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
- return 0;
- }
-
- /*
- * Buffer DMA mode - 1 location per endpoit
- * Descriptor DMA mode - 4 locations per endpoint
- */
- ep_dirs = hsotg->hw_params.dev_ep_dirs;
-
- for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
- val += ep_dirs & 3 ? 1 : 2;
- ep_dirs >>= 2;
- }
-
- if (using_desc_dma(hsotg))
- val = val * 4;
-
- return val;
-}
-
-/**
* dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
* device mode TX FIFOs
*/
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
{
- int ep_info_size;
int addr;
int tx_addr_max;
u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
hsotg->params.g_np_tx_fifo_size);
/* Get Endpoint Info Control block size in DWORDs. */
- ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
- tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
+ tx_addr_max = hsotg->hw_params.total_fifo_size;
addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6e03a9..03fd20f0b496 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
}
for (fifo = 1; fifo <= fifo_count; fifo++) {
- dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
- FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
if (hsotg->params.g_tx_fifo_size[fifo] < min ||
hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
struct dwc2_hw_params *hw = &hsotg->hw_params;
bool forced;
u32 gnptxfsiz;
+ int fifo, fifo_count;
if (hsotg->dr_mode == USB_DR_MODE_HOST)
return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ for (fifo = 1; fifo <= fifo_count; fifo++) {
+ hw->g_tx_fifo_size[fifo] =
+ (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+ FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ }
+
if (forced)
dwc2_clear_force_mode(hsotg);
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
/* hwcfg1 */
hw->dev_ep_dirs = hwcfg1;
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+ GHWCFG4_NUM_IN_EPS_SHIFT;
hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
GRXFSIZ_DEPTH_SHIFT;
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7bd2766..7ae0eefc7cc7 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
- while (--i >= 0)
+ while (--i >= 0) {
+ clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
+ }
return PTR_ERR(clk);
}
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
+ .pm = &dwc3_of_simple_dev_pm_ops,
},
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd986cf82..639dd1b163a0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 500;
+ u32 timeout = 1000;
u32 reg;
int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
*/
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- unsigned int mult = ep->mult - 1;
+ unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 0a19a76645ad..31cce7805eb2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -508,8 +508,8 @@ choice
controller, and the relevant drivers for each function declared
by the device.
-endchoice
-
source "drivers/usb/gadget/legacy/Kconfig"
+endchoice
+
endif # USB_GADGET
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index b6cf5ab5a0a1..b540935891af 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -638,10 +638,10 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
-static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
{
struct ffs_data *ffs = file->private_data;
- unsigned int mask = POLLWRNORM;
+ __poll_t mask = POLLWRNORM;
int ret;
poll_wait(file, &ffs->ev.waitq, wait);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index daae35318a3a..a73efb1c47d0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -413,10 +413,10 @@ release_write_pending_unlocked:
return status;
}
-static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index c5bce8e22983..5780fba620ab 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
- struct tasklet_struct tx_tasklet;
struct hrtimer task_timer;
-
bool timer_stopping;
};
@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
@@ -1148,17 +1146,15 @@ err:
}
/*
- * This transmits the NTB if there are frames waiting.
+ * The transmit should only be run if no skb data has been sent
+ * for a certain duration.
*/
-static void ncm_tx_tasklet(unsigned long data)
+static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
- struct f_ncm *ncm = (void *)data;
-
- if (ncm->timer_stopping)
- return;
+ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
/* Only send if data is available. */
- if (ncm->skb_tx_data) {
+ if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
-}
-
-/*
- * The transmit should only be run if no skb data has been sent
- * for a certain duration.
- */
-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
-{
- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
- tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART;
}
@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
- tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index dd607b99eb1d..453578c4af69 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -680,12 +680,12 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
return 0;
}
-static unsigned int
+static __poll_t
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
- int status = 0;
+ __poll_t status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 278d50ff1eea..9e33d5206d54 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -193,7 +193,7 @@ int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
* This function implements video queue polling and is intended to be used by
* the device poll handler.
*/
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
return vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 51ee94e5cf2b..f9f65b5c1062 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -72,7 +72,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *buf, int nonblocking);
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f3069db6f08e..9a9019625496 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -329,7 +329,7 @@ uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
-static unsigned int
+static __poll_t
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 9570bbeced4f..784bf86dad4f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -13,14 +13,6 @@
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
-menuconfig USB_GADGET_LEGACY
- bool "Legacy USB Gadget Support"
- help
- Legacy USB gadgets are USB gadgets that do not use the USB gadget
- configfs interface.
-
-if USB_GADGET_LEGACY
-
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -487,7 +479,7 @@ endif
# or video class gadget drivers), or specific hardware, here.
config USB_G_WEBCAM
tristate "USB Webcam Gadget"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select USB_LIBCOMPOSITE
select VIDEOBUF2_VMALLOC
select USB_F_UVC
@@ -498,5 +490,3 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
-
-endif
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9343ec436485..05691254d473 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1209,11 +1209,11 @@ dev_release (struct inode *inode, struct file *fd)
return 0;
}
-static unsigned int
+static __poll_t
ep0_poll (struct file *fd, poll_table *wait)
{
struct dev_data *dev = fd->private_data;
- int mask = 0;
+ __poll_t mask = 0;
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7dec2f5..1b3efb14aec7 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
- goto err1;
-
- ret = device_add(&gadget->dev);
- if (ret)
- goto err2;
+ goto err_put_gadget;
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc->dev.parent = parent;
ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
if (ret)
- goto err3;
+ goto err_put_udc;
+
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_put_udc;
udc->gadget = gadget;
gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
ret = device_add(&udc->dev);
if (ret)
- goto err4;
+ goto err_unlist_udc;
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
/* pick up one of pending gadget drivers */
ret = check_pending_gadget_drivers(udc);
if (ret)
- goto err5;
+ goto err_del_udc;
mutex_unlock(&udc_lock);
return 0;
-err5:
+ err_del_udc:
device_del(&udc->dev);
-err4:
+ err_unlist_udc:
list_del(&udc->list);
mutex_unlock(&udc_lock);
-err3:
- put_device(&udc->dev);
device_del(&gadget->dev);
-err2:
- kfree(udc);
+ err_put_udc:
+ put_device(&udc->dev);
-err1:
+ err_put_gadget:
put_device(&gadget->dev);
return ret;
}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f7895dbcf88..e26e685d8a57 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
{
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
seq_printf(s, "%d\n", ring->cycle_state);
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
- struct xhci_ring *ring,
+ struct xhci_ring **ring,
const char *name,
struct dentry *parent)
{
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
- dev->eps[ep_index].new_ring,
+ &dev->eps[ep_index].new_ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
priv->dev = dev;
dev->debugfs_private = priv;
- xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+ xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
"ep00", priv->root);
xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
ARRAY_SIZE(xhci_extcap_dbc),
"reg-ext-dbc");
- xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 15f7d422885f..3a29b32a3bd0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -971,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0;
}
- xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
- if (!xhci->devs[slot_id])
+ dev = kzalloc(sizeof(*dev), flags);
+ if (!dev)
return 0;
- dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1015,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
trace_xhci_alloc_virt_device(dev);
+ xhci->devs[slot_id] = dev;
+
return 1;
fail:
- xhci_free_virt_device(xhci, slot_id);
+
+ if (dev->in_ctx)
+ xhci_free_container_ctx(xhci, dev->in_ctx);
+ if (dev->out_ctx)
+ xhci_free_container_ctx(xhci, dev->out_ctx);
+ kfree(dev);
+
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7ef1274ef7f7..1aad89b8aba0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -178,6 +178,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0014)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)
xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6eb87c6e4d24..c5cbc685c691 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3112,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
{
u32 maxp, total_packet_count;
- /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+ /* MTK xHCI 0.96 contains some features from 1.0 */
if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
return ((td_total_len - transferred) >> 10);
@@ -3121,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
trb_buff_len == td_total_len)
return 0;
- /* for MTK xHCI, TD size doesn't include this TRB */
- if (xhci->quirks & XHCI_MTK_HOST)
+ /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+ if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
trb_buff_len = 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2424d3020ca3..da6dbe3ebd8b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
-
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
+ if (ret) {
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
+ }
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ad3109490c0f..1fa00b35f4ad 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -677,10 +677,10 @@ static int iowarrior_release(struct inode *inode, struct file *file)
return retval;
}
-static unsigned iowarrior_poll(struct file *file, poll_table * wait)
+static __poll_t iowarrior_poll(struct file *file, poll_table * wait)
{
struct iowarrior *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!dev->present)
return POLLERR | POLLHUP;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 5c1a3b852453..074398c1e410 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -409,10 +409,10 @@ exit:
/**
* ld_usb_poll
*/
-static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
+static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
{
struct ld_usb *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c5be6e9e24a5..941c45028828 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -224,7 +224,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
static inline void tower_delete (struct lego_usb_tower *dev);
static int tower_open (struct inode *inode, struct file *file);
static int tower_release (struct inode *inode, struct file *file);
-static unsigned int tower_poll (struct file *file, poll_table *wait);
+static __poll_t tower_poll (struct file *file, poll_table *wait);
static loff_t tower_llseek (struct file *file, loff_t off, int whence);
static void tower_abort_transfers (struct lego_usb_tower *dev);
@@ -509,10 +509,10 @@ static void tower_check_for_read_packet (struct lego_usb_tower *dev)
/**
* tower_poll
*/
-static unsigned int tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll (struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf68b463..f723f7b8c9ac 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
if (gpio_is_valid(hub->gpio_reset)) {
err = devm_gpio_request_one(dev, hub->gpio_reset,
GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ /* Datasheet defines a hardware reset to be at least 100us */
+ usleep_range(100, 10000);
if (err) {
dev_err(dev,
"unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753ab99b..cc5b296bff3f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
break;
case MON_IOCQ_RING_SIZE:
+ mutex_lock(&rp->fetch_lock);
ret = rp->b_size;
+ mutex_unlock(&rp->fetch_lock);
break;
case MON_IOCT_RING_SIZE:
@@ -1189,11 +1191,11 @@ static long mon_bin_compat_ioctl(struct file *file,
}
#endif /* CONFIG_COMPAT */
-static unsigned int
+static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{
struct mon_reader_bin *rp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (file->f_mode & FMODE_READ)
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
+ mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size)
+ if (offset >= rp->b_size) {
+ mutex_unlock(&rp->fetch_lock);
return VM_FAULT_SIGBUS;
+ }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
+ mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606a211b..6c036de63272 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&musb->dev_timer);
- } else {
+ } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+ /*
+ * When babble condition happens, drvvbus interrupt
+ * is also generated. Ignore this drvvbus interrupt
+ * and let babble interrupt handler recovers the
+ * controller; otherwise, the host-mode flag is lost
+ * due to the MUSB_DEV_MODE() call below and babble
+ * recovery logic will not be called.
+ */
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273bf5beb..06d502b3e913 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1aba9105b369..fc68952c994a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4faa09fe308c..8b4ecd2bd297 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -915,6 +915,12 @@
#define ICPDAS_I7563U_PID 0x0105
/*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
+#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3b3513874cfd..b6320e3be429 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
@@ -280,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_ME910 0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -645,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
.reserved = BIT(1) | BIT(3),
};
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(3),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -674,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
.reserved = BIT(4) | BIT(5),
};
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+ .reserved = BIT(1) | BIT(4),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1178,6 +1190,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Yuga products use Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+ .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1244,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+ .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e3892541a489..613f91add03d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
+ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
break;
case 2:
dev_dbg(dev, "NMEA GPS interface found\n");
+ sendsetup = true;
break;
case 3:
dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046e7c05..f72d045ee9ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/*
* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
* JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374a824e..a7d08ae0adad 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
@@ -136,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+ "Norelsys",
+ "NS1068X",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3df8ee82faf..e31a6f204397 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
* step 1?
*/
if (ud->tcp_socket) {
- dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
- ud->tcp_socket);
+ dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 4f48b306713f..c31c8402a0c5 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
struct stub_priv *priv;
struct urb *urb;
- dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) {
urb = priv->urb;
- dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+ dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+ priv->seqnum);
usb_kill_urb(urb);
kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037f541f..6c5a59313999 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
- dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
- priv->urb);
-
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
- "failed to unlink a urb %p, ret %d\n",
- priv->urb, ret);
+ "failed to unlink a urb # %lu, ret %d\n",
+ priv->seqnum, ret);
return 0;
}
@@ -322,23 +319,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
return priv;
}
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
+ int epnum = pdu->base.ep;
+ int dir = pdu->base.direction;
+
+ if (epnum < 0 || epnum > 15)
+ goto err_ret;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
- if (!ep) {
- dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
- epnum);
- BUG();
- }
+ if (!ep)
+ goto err_ret;
epd = &ep->desc;
+
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +361,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
if (usb_endpoint_xfer_isoc(epd)) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(epd);
+ maxp *= usb_endpoint_maxp_mult(epd);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&sdev->udev->dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ return -1;
+ }
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
+err_ret:
/* NOT REACHED */
- dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
- return 0;
+ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+ return -1;
}
static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +449,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+ int pipe = get_pipe(sdev, pdu);
+
+ if (pipe == -1)
+ return;
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce96c212..f0ec41a50cbc 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
if (sdev->ud.tcp_socket == NULL) {
- usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+ usbip_dbg_stub_tx("ignore urb for closed connection\n");
/* It will be freed in stub_device_cleanup_urbs(). */
} else if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&sdev->udev->dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
else
@@ -183,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index f7978933b402..ee2bbce24584 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
udev->devnum, udev->devpath, usb_speed_string(udev->speed));
- pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+ pr_debug("tt hub ttport %d\n", udev->ttport);
dev_dbg(dev, " ");
for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
}
pr_debug("\n");
- dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
- dev_dbg(dev,
- "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
- &udev->descriptor, udev->config,
- udev->actconfig, udev->rawdescriptors);
+ dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+ udev->bus->bus_name);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
dev = &urb->dev->dev;
- dev_dbg(dev, " urb :%p\n", urb);
- dev_dbg(dev, " dev :%p\n", urb->dev);
-
usbip_dump_usb_device(urb->dev);
dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " status :%d\n", urb->status);
dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
- dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
dev_dbg(dev, " transfer_buffer_length:%d\n",
urb->transfer_buffer_length);
dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
- dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
dev_dbg(dev, " interval :%d\n", urb->interval);
dev_dbg(dev, " error_count :%d\n", urb->error_count);
- dev_dbg(dev, " context :%p\n", urb->context);
- dev_dbg(dev, " complete :%p\n", urb->complete);
}
EXPORT_SYMBOL_GPL(usbip_dump_urb);
@@ -317,26 +306,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
int total = 0;
+ if (!sock || !buf || !size)
+ return -EINVAL;
+
iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
usbip_dbg_xmit("enter\n");
- if (!sock || !buf || !size) {
- pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
- size);
- return -EINVAL;
- }
-
do {
- int sz = msg_data_left(&msg);
+ msg_data_left(&msg);
sock->sk->sk_allocation = GFP_NOIO;
result = sock_recvmsg(sock, &msg, MSG_WAITALL);
- if (result <= 0) {
- pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
- sock, buf + total, sz, result, total);
+ if (result <= 0)
goto err;
- }
total += result;
} while (msg_data_left(&msg));
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c8c505..473fb8a87289 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
/* lock for status */
spinlock_t lock;
+ int sockfd;
struct socket *tcp_socket;
struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 6b3278c4b72a..c3e1008aa491 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct vhci_device *vdev;
unsigned long flags;
- usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
- hcd, urb, mem_flags);
-
if (portnum > VHCI_HC_PORTS) {
pr_err("invalid port number %d\n", portnum);
return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct vhci_device *vdev;
unsigned long flags;
- pr_info("dequeue a urb %p\n", urb);
-
spin_lock_irqsave(&vhci->lock, flags);
priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* tcp connection is closed */
spin_lock(&vdev->priv_lock);
- pr_info("device %p seems to be disconnected\n", vdev);
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* vhci_rx will receive RET_UNLINK and give back the URB.
* Otherwise, we give back it here.
*/
- pr_info("gives back urb %p\n", urb);
-
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink->unlink_seqnum = priv->seqnum;
- pr_info("device %p seems to be still connected\n", vdev);
-
/* send cmd_unlink and try to cancel the pending URB in the
* peer */
list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
/* need this? see stub_dev.c */
if (ud->tcp_socket) {
- pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+ pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 90577e8b2282..112ebb90d8c9 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
urb = priv->urb;
status = urb->status;
- usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
- urb, priv, seqnum);
+ usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
switch (status) {
case -ENOENT:
/* fall through */
case -ECONNRESET:
- dev_info(&urb->dev->dev,
- "urb %p was unlinked %ssynchronuously.\n", urb,
- status == -ENOENT ? "" : "a");
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u was unlinked %ssynchronuously\n",
+ seqnum, status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
/* no info output */
break;
default:
- dev_info(&urb->dev->dev,
- "urb %p may be in a error, status %d\n", urb,
- status);
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u may be in a error, status %d\n",
+ seqnum, status);
}
list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
- pr_info("max seqnum %d\n",
+ pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+ pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock_irqsave(&vhci->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum);
} else {
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f7472cac4..091f76b7196d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
/*
* output example:
- * hub port sta spd dev socket local_busid
- * hs 0000 004 000 00000000 c5a7bb80 1-2.3
+ * hub port sta spd dev sockfd local_busid
+ * hs 0000 004 000 00000000 3 1-2.3
* ................................................
- * ss 0008 004 000 00000000 d8cee980 2-3.4
+ * ss 0008 004 000 00000000 4 2-3.4
* ................................................
*
- * IP address can be retrieved from a socket pointer address by looking
- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
- * port number and its peer IP address.
+ * Output includes socket fd instead of socket pointer address to avoid
+ * leaking kernel memory address in:
+ * /sys/devices/platform/vhci_hcd.0/status and in debug output.
+ * The socket pointer address is not used at the moment and it was made
+ * visible as a convenient way to find IP address from socket pointer
+ * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
+ * hole, the change is made to use sockfd instead.
+ *
*/
static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
if (vdev->ud.status == VDEV_ST_USED) {
*out += sprintf(*out, "%03u %08x ",
vdev->speed, vdev->devid);
- *out += sprintf(*out, "%16p %s",
- vdev->ud.tcp_socket,
+ *out += sprintf(*out, "%u %s",
+ vdev->ud.sockfd,
dev_name(&vdev->udev->dev));
} else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
char *s = out;
/*
- * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+ * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
+ * thus the * 2.
*/
out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->devid = devid;
vdev->speed = speed;
+ vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index d625a2ff4b71..9aed15a358b7 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
- usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+ priv->seqnum);
/* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e30989148..1e8a23d92cb4 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
urb_p->new = 1;
urb_p->seqnum = pdu->base.seqnum;
+ if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(urb_p->ep->desc);
+ maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&udc->gadget.dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ ret = -EMSGSIZE;
+ goto free_urbp;
+ }
+ }
+
ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
if (ret) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0919ec..3ccb17c3e840 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&udc->gadget.dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
iovnum = 2 + urb->number_of_packets;
else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 4797217e5e72..8cc4b48ff127 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,7 +46,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLIN) {
/* An event has been signaled, call function */
@@ -113,7 +113,7 @@ int vfio_virqfd_enable(void *opaque,
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
- unsigned int events;
+ __poll_t events;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c7bdeb655646..9c3f8160ef24 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {
#define VHOST_RX_BATCH 64
struct vhost_net_buf {
- struct sk_buff **queue;
+ void **queue;
int tail;
int head;
};
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
- struct skb_array *rx_array;
+ struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
};
@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
struct vhost_net_buf *rxq = &nvq->rxq;
rxq->head = 0;
- rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
VHOST_RX_BATCH);
return rxq->tail;
}
@@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
- if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
- skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
- vhost_net_buf_get_size(rxq));
+ if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+ ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq),
+ __skb_array_destroy_skb);
rxq->head = rxq->tail = 0;
}
}
+static int vhost_net_buf_peek_len(void *ptr)
+{
+ if (tun_is_xdp_buff(ptr)) {
+ struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+ return xdp->data_end - xdp->data;
+ }
+
+ return __skb_array_len_with_tag(ptr);
+}
+
static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
{
struct vhost_net_buf *rxq = &nvq->rxq;
@@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
return 0;
out:
- return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+ return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
}
static void vhost_net_buf_init(struct vhost_net_buf *rxq)
@@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
int len = 0;
unsigned long flags;
- if (rvq->rx_array)
+ if (rvq->rx_ring)
return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net)
};
size_t total_len = 0;
int err, mergeable;
- s16 headcount;
+ s16 headcount, nheads = 0;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
@@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+ headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
@@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net)
* they refilled. */
goto out;
}
- if (nvq->rx_array)
+ if (nvq->rx_ring)
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
@@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount);
goto out;
}
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- headcount);
+ nheads += headcount;
+ if (nheads > VHOST_RX_BATCH) {
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
+ nheads = 0;
+ }
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
@@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net)
}
vhost_net_enable_vq(net, vq);
out:
+ if (nheads)
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
mutex_unlock(&vq->mutex);
}
@@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
- struct sk_buff **queue;
+ void **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
GFP_KERNEL);
if (!queue) {
kfree(vqs);
@@ -1046,23 +1065,23 @@ err:
return ERR_PTR(r);
}
-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
{
- struct skb_array *array;
+ struct ptr_ring *ring;
struct file *file = fget(fd);
if (!file)
return NULL;
- array = tun_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tun_get_tx_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = tap_get_skb_array(file);
- if (!IS_ERR(array))
+ ring = tap_get_ptr_ring(file);
+ if (!IS_ERR(ring))
goto out;
- array = NULL;
+ ring = NULL;
out:
fput(file);
- return array;
+ return ring;
}
static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vq->private_data = sock;
vhost_net_buf_unproduce(nvq);
if (index == VHOST_NET_VQ_RX)
- nvq->rx_array = get_tap_skb_array(fd);
+ nvq->rx_ring = get_tap_ptr_ring(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
@@ -1208,6 +1227,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
@@ -1353,7 +1373,7 @@ static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
return vhost_chr_write_iter(dev, from);
}
-static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 33ac2b186b85..8d4374606756 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -170,7 +170,7 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
- if (!((unsigned long)key & poll->mask))
+ if (!(key_to_poll(key) & poll->mask))
return 0;
vhost_poll_queue(poll);
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
- unsigned long mask;
+ __poll_t mask;
int ret = 0;
if (poll->wqh)
@@ -211,7 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
mask = file->f_op->poll(file, &poll->table);
if (mask)
- vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
- mutex_lock(&d->vqs[i]->mutex);
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
vhost_iotlb_notify_vq(dev, msg);
break;
case VHOST_IOTLB_INVALIDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
vhost_vq_meta_reset(dev);
vhost_del_umem_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1);
@@ -1057,10 +1061,10 @@ done:
}
EXPORT_SYMBOL(vhost_chr_write_iter);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &dev->wait, wait);
@@ -1877,12 +1881,7 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = vhost16_to_cpu(vq, desc->next);
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 79c6e7a60a5e..7876a3d7d1b3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -34,7 +34,7 @@ struct vhost_poll {
wait_queue_head_t *wqh;
wait_queue_entry_t wait;
struct vhost_work work;
- unsigned long mask;
+ __poll_t mask;
struct vhost_dev *dev;
};
@@ -43,7 +43,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
@@ -217,7 +217,7 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index d84329676689..6a34ab936726 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
struct pci_dev *host;
int intensity;
- host = pci_get_bus_and_slot(0, 0);
+ host = pci_get_domain_bus_and_slot(0, 0, 0);
if (!host) {
pr_err("unable to find PCI host\n");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239ea3d09..f5574060f9c8 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f842f9c0..e4bd63e9db6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea92737a..4dc5ee8debeb 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587cbc86..e707e617bf1c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -556,7 +556,7 @@ static void __init iounmap_macfb(void)
static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
char *option = NULL;
int err;
@@ -670,15 +670,17 @@ static int __init macfb_init(void)
* code is really broken :-)
*/
- while ((ndev = nubus_find_type(NUBUS_CAT_DISPLAY,
- NUBUS_TYPE_VIDEO, ndev)))
- {
+ for_each_func_rsrc(ndev) {
unsigned long base = ndev->board->slot_addr;
if (mac_bi_data.videoaddr < base ||
mac_bi_data.videoaddr - base > 0xFFFFFF)
continue;
+ if (ndev->category != NUBUS_CAT_DISPLAY ||
+ ndev->type != NUBUS_TYPE_VIDEO)
+ continue;
+
video_is_nubus = 1;
slot_addr = (unsigned char *)base;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d70ad6d38879..b0597bef4555 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -565,11 +565,11 @@ static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
/*
* Returns a bitmask indicating whether a read will block
*/
-static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
- unsigned int mask;
+ __poll_t mask;
spin_lock_irqsave(&dbq->lock, flags);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index a9192fe4f345..c92131edfaba 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -522,10 +522,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
return -EBUSY;
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
- if (!vm_dev) {
- rc = -ENOMEM;
- goto free_mem;
- }
+ if (!vm_dev)
+ return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
vm_dev->vdev.dev.release = virtio_mmio_release_dev;
@@ -535,17 +533,14 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (vm_dev->base == NULL) {
- rc = -EFAULT;
- goto free_vmdev;
- }
+ if (vm_dev->base == NULL)
+ return -EFAULT;
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- rc = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
/* Check device version */
@@ -553,8 +548,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- rc = -ENXIO;
- goto unmap;
+ return -ENXIO;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -563,8 +557,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- rc = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -590,33 +583,15 @@ static int virtio_mmio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vm_dev);
rc = register_virtio_device(&vm_dev->vdev);
- if (rc) {
- iounmap(vm_dev->base);
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
+ if (rc)
put_device(&vm_dev->vdev.dev);
- }
- return rc;
-unmap:
- iounmap(vm_dev->base);
-free_mem:
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
-free_vmdev:
- devm_kfree(&pdev->dev, vm_dev);
+
return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
- struct resource *mem;
-
- iounmap(vm_dev->base);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
unregister_virtio_device(&vm_dev->vdev);
return 0;
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a90728ceec5a..55e11bf8ebaf 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -13,9 +13,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
@@ -30,11 +29,17 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
pdata->pullup_duration = delay;
} else {
if (pdata->pullup_duration) {
- gpio_direction_output(pdata->pin, 1);
-
+ /*
+ * This will OVERRIDE open drain emulation and force-pull
+ * the line high for some time.
+ */
+ gpiod_set_raw_value(pdata->gpiod, 1);
msleep(pdata->pullup_duration);
-
- gpio_direction_input(pdata->pin);
+ /*
+ * This will simply set the line as input since we are doing
+ * open drain emulation in the GPIO library.
+ */
+ gpiod_set_value(pdata->gpiod, 1);
}
pdata->pullup_duration = 0;
}
@@ -42,28 +47,18 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
return 0;
}
-static void w1_gpio_write_bit_dir(void *data, u8 bit)
-{
- struct w1_gpio_platform_data *pdata = data;
-
- if (bit)
- gpio_direction_input(pdata->pin);
- else
- gpio_direction_output(pdata->pin, 0);
-}
-
-static void w1_gpio_write_bit_val(void *data, u8 bit)
+static void w1_gpio_write_bit(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
- gpio_set_value(pdata->pin, bit);
+ gpiod_set_value(pdata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
- return gpio_get_value(pdata->pin) ? 1 : 0;
+ return gpiod_get_value(pdata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
@@ -74,107 +69,85 @@ static const struct of_device_id w1_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
-static int w1_gpio_probe_dt(struct platform_device *pdev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int gpio;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "linux,open-drain", NULL))
- pdata->is_open_drain = 1;
-
- gpio = of_get_gpio(np, 0);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to parse gpio property for data pin (%d)\n",
- gpio);
-
- return gpio;
- }
- pdata->pin = gpio;
-
- gpio = of_get_gpio(np, 1);
- if (gpio == -EPROBE_DEFER)
- return gpio;
- /* ignore other errors as the pullup gpio is optional */
- pdata->ext_pullup_enable_pin = gpio;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ /* Enforce open drain mode by default */
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
if (of_have_populated_dt()) {
- err = w1_gpio_probe_dt(pdev);
- if (err < 0)
- return err;
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * This parameter means that something else than the gpiolib has
+ * already set the line into open drain mode, so we should just
+ * driver it high/low like we are in full control of the line and
+ * open drain will happen transparently.
+ */
+ if (of_get_property(np, "linux,open-drain", NULL))
+ gflags = GPIOD_OUT_LOW;
+
+ pdev->dev.platform_data = pdata;
}
-
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "No configuration data\n");
+ dev_err(dev, "No configuration data\n");
return -ENXIO;
}
- master = devm_kzalloc(&pdev->dev, sizeof(struct w1_bus_master),
+ master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master) {
- dev_err(&pdev->dev, "Out of memory\n");
+ dev_err(dev, "Out of memory\n");
return -ENOMEM;
}
- err = devm_gpio_request(&pdev->dev, pdata->pin, "w1");
- if (err) {
- dev_err(&pdev->dev, "gpio_request (pin) failed\n");
- return err;
+ pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+ if (IS_ERR(pdata->gpiod)) {
+ dev_err(dev, "gpio_request (pin) failed\n");
+ return PTR_ERR(pdata->gpiod);
}
- if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
- err = devm_gpio_request_one(&pdev->dev,
- pdata->ext_pullup_enable_pin, GPIOF_INIT_LOW,
- "w1 pullup");
- if (err < 0) {
- dev_err(&pdev->dev, "gpio_request_one "
- "(ext_pullup_enable_pin) failed\n");
- return err;
- }
+ pdata->pullup_gpiod =
+ devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->pullup_gpiod)) {
+ dev_err(dev, "gpio_request_one "
+ "(ext_pullup_enable_pin) failed\n");
+ return PTR_ERR(pdata->pullup_gpiod);
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
-
- if (pdata->is_open_drain) {
- gpio_direction_output(pdata->pin, 1);
- master->write_bit = w1_gpio_write_bit_val;
- } else {
- gpio_direction_input(pdata->pin);
- master->write_bit = w1_gpio_write_bit_dir;
+ gpiod_direction_output(pdata->gpiod, 1);
+ master->write_bit = w1_gpio_write_bit;
+
+ /*
+ * If we are using open drain emulation from the GPIO library,
+ * we need to use this pullup function that hammers the line
+ * high using a raw accessor to provide pull-up for the w1
+ * line.
+ */
+ if (gflags == GPIOD_OUT_LOW_OPEN_DRAIN)
master->set_pullup = w1_gpio_set_pullup;
- }
err = w1_add_master_device(master);
if (err) {
- dev_err(&pdev->dev, "w1_add_master device failed\n");
+ dev_err(dev, "w1_add_master device failed\n");
return err;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 1);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
@@ -189,8 +162,8 @@ static int w1_gpio_remove(struct platform_device *pdev)
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 0);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 0);
w1_remove_master_device(master);
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index a36661cd1f05..f876772c0fb4 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -59,7 +59,11 @@ enum w1_netlink_message_types {
* @type: one of enum w1_netlink_message_types
* @status: kernel feedback for success 0 or errno failure value
* @len: length of data following w1_netlink_msg
- * @id: union holding master bus id (msg.id) and slave device id (id[8]).
+ * @id: union holding bus master id (msg.id) and slave device id (id[8]).
+ * @id.id: Slave ID (8 bytes)
+ * @id.mst: bus master identification
+ * @id.mst.id: bus master ID
+ * @id.mst.res: bus master reserved
* @data: start address of any following data
*
* The base message structure for w1 messages over netlink.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1f310a..5bf613d3b7d6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called ziirave_wdt.
+config RAVE_SP_WATCHDOG
+ tristate "RAVE SP Watchdog timer"
+ depends on RAVE_SP_CORE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog on RAVE SP device.
+
# ALPHA Architecture
# ARM Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 715a21078e0c..135c5e81f25e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644
index 000000000000..35db173252f9
--- /dev/null
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+ RAVE_SP_RESET_BYTE = 1,
+ RAVE_SP_RESET_REASON_NORMAL = 0,
+ RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout: Largest possible watchdog timeout setting
+ * @min_timeout: Smallest possible watchdog timeout setting
+ *
+ * @configure: Function to send configuration command
+ * @restart: Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+ unsigned int max_timeout;
+ unsigned int min_timeout;
+
+ int (*configure)(struct watchdog_device *, bool);
+ int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd: Underlying watchdog device
+ * @sp: Pointer to parent RAVE SP device
+ * @variant: Device specific variant information
+ * @reboot_notifier: Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+ struct watchdog_device wdd;
+ struct rave_sp *sp;
+ const struct rave_sp_wdt_variant *variant;
+ struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+ size_t data_size)
+{
+ return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+ data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = 0,
+ [3] = on,
+ [4] = on ? wdd->timeout : 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = on,
+ [3] = (u8)wdd->timeout,
+ [4] = (u8)(wdd->timeout >> 8),
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd: Device to configure
+ * @on: Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ * - Wheither it is ON or OFF
+ * - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+ return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE,
+ [3] = RAVE_SP_RESET_REASON_NORMAL
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Restart handler is called in atomic context which means we
+ * can't communicate to SP via UART. Luckily for use SP will
+ * wait 500ms before actually resetting us, so we ask it to do
+ * so here and let the rest of the system go on wrapping
+ * things up.
+ */
+ if (action == SYS_DOWN || action == SYS_HALT) {
+ struct rave_sp_wdt *sp_wd =
+ container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+ const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+ if (ret < 0)
+ dev_err(sp_wd->wdd.parent,
+ "Failed to issue restart command (%d)", ret);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ /*
+ * The actual work was done by reboot notifier above. SP
+ * firmware waits 500 ms before issuing reset, so let's hang
+ * here for twice that delay and hopefuly we'd never reach
+ * the return statement.
+ */
+ mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+ return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+
+ ret = rave_sp_wdt_configure(wdd, true);
+ if (!ret)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+ return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_PET_WDT,
+ [1] = 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rave_sp_wdt_start,
+ .stop = rave_sp_wdt_stop,
+ .ping = rave_sp_wdt_ping,
+ .set_timeout = rave_sp_wdt_set_timeout,
+ .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+ .max_timeout = 255,
+ .min_timeout = 1,
+ .configure = rave_sp_wdt_legacy_configure,
+ .restart = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+ .max_timeout = 180,
+ .min_timeout = 60,
+ .configure = rave_sp_wdt_rdu_configure,
+ .restart = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+ {
+ .compatible = "zii,rave-sp-watchdog-legacy",
+ .data = &rave_sp_wdt_legacy,
+ },
+ {
+ .compatible = "zii,rave-sp-watchdog",
+ .data = &rave_sp_wdt_rdu,
+ },
+ { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct rave_sp_wdt *sp_wd;
+ struct nvmem_cell *cell;
+ __le16 timeout = 0;
+ int ret;
+
+ sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+ if (!sp_wd)
+ return -ENOMEM;
+
+ sp_wd->variant = of_device_get_match_data(dev);
+ sp_wd->sp = dev_get_drvdata(dev->parent);
+
+ wdd = &sp_wd->wdd;
+ wdd->parent = dev;
+ wdd->info = &rave_sp_wdt_info;
+ wdd->ops = &rave_sp_wdt_ops;
+ wdd->min_timeout = sp_wd->variant->min_timeout;
+ wdd->max_timeout = sp_wd->variant->max_timeout;
+ wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdd->timeout = 60;
+
+ cell = nvmem_cell_get(dev, "wdt-timeout");
+ if (!IS_ERR(cell)) {
+ size_t len;
+ void *value = nvmem_cell_read(cell, &len);
+
+ if (!IS_ERR(value)) {
+ memcpy(&timeout, value, min(len, sizeof(timeout)));
+ kfree(value);
+ }
+ nvmem_cell_put(cell);
+ }
+ watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+ watchdog_set_restart_priority(wdd, 255);
+ watchdog_stop_on_unregister(wdd);
+
+ sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+ ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register reboot notifier\n");
+ return ret;
+ }
+
+ /*
+ * We don't know if watchdog is running now. To be sure, let's
+ * start it and depend on watchdog core to ping it
+ */
+ wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+ ret = rave_sp_wdt_start(wdd);
+ if (ret) {
+ dev_err(dev, "Watchdog didn't start\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ rave_sp_wdt_stop(wdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+ .probe = rave_sp_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd54678ab7..e5d0c28372ea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f77e499afddd..065f0b607373 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
kfree(resource);
}
+/*
+ * Host memory not allocated to dom0. We can use this range for hotplug-based
+ * ballooning.
+ *
+ * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+ * management algorithms (arch_remove_reservations()) look into guest e820,
+ * which we don't want.
+ */
+static struct resource hostmem_resource = {
+ .name = "Host RAM",
+};
+
+void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+{}
+
static struct resource *additional_memory_resource(phys_addr_t size)
{
- struct resource *res;
- int ret;
+ struct resource *res, *res_hostmem;
+ int ret = -ENOMEM;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, res,
- size, 0, -1,
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
- if (ret < 0) {
- pr_err("Cannot allocate new System RAM resource\n");
- kfree(res);
- return NULL;
+ res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (res_hostmem) {
+ /* Try to grab a range from hostmem */
+ res_hostmem->name = "Host memory";
+ ret = allocate_resource(&hostmem_resource, res_hostmem,
+ size, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ }
+
+ if (!ret) {
+ /*
+ * Insert this resource into iomem. Because hostmem_resource
+ * tracks portion of guest e820 marked as UNUSABLE noone else
+ * should try to use it.
+ */
+ res->start = res_hostmem->start;
+ res->end = res_hostmem->end;
+ ret = insert_resource(&iomem_resource, res);
+ if (ret < 0) {
+ pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+ res->start, res->end);
+ release_memory_resource(res_hostmem);
+ res_hostmem = NULL;
+ res->start = res->end = 0;
+ }
+ }
+
+ if (ret) {
+ ret = allocate_resource(&iomem_resource, res,
+ size, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new System RAM resource\n");
+ kfree(res);
+ return NULL;
+ }
}
#ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
+ release_memory_resource(res_hostmem);
return NULL;
}
}
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);
+
+ arch_xen_balloon_init(&hostmem_resource);
#endif
#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9729a64ea1a9..72c0416a01cc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -621,9 +621,9 @@ static long evtchn_ioctl(struct file *file,
return rc;
}
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+static __poll_t evtchn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3b053b..bd56653b9bbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1) {
- range--;
+ if (map->unmap_ops[offset+range].handle == -1)
break;
- }
range++;
}
err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (use_ptemod) {
map->vma = NULL;
+ unmap_grant_pages(map, 0, map->count);
+ }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 6cc1c15bcd84..9ade533d9e40 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -139,7 +139,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &xen_mce_chrdev_wait, wait);
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index d1e1d8d2b9d5..78804e71f9a6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit();
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -878,7 +878,7 @@ received:
return ret;
}
-static unsigned int pvcalls_front_poll_passive(struct file *file,
+static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
@@ -935,12 +935,12 @@ static unsigned int pvcalls_front_poll_passive(struct file *file,
return 0;
}
-static unsigned int pvcalls_front_poll_active(struct file *file,
+static __poll_t pvcalls_front_poll_active(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int32_t in_error, out_error;
struct pvcalls_data_intf *intf = map->active.ring;
@@ -958,12 +958,12 @@ static unsigned int pvcalls_front_poll_active(struct file *file,
return mask;
}
-unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+__poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
- int ret;
+ __poll_t ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 82fc54f8eb77..5bb72d3f8337 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index f3b089b7c0b6..e17ec3fce590 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -645,7 +645,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
{
struct xenbus_file_priv *u = file->private_data;